ixl_pf_main.c (e706512a) ixl_pf_main.c (402810d3)
1/******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35
36#include "ixl_pf.h"
37
38#ifdef PCI_IOV
39#include "ixl_pf_iov.h"
40#endif
41
42#ifdef IXL_IW
43#include "ixl_iw.h"
44#include "ixl_iw_int.h"
45#endif
46
47static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52static char * ixl_switch_element_string(struct sbuf *, u8, u16);
53static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
54
55/* Sysctls */
56static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
63
64static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
65static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
66
67/* Debug Sysctls */
68static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
69static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
70static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88
89/* Debug Sysctls */
90static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94#ifdef IXL_DEBUG
95static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
96static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
97#endif
98
99#ifdef IXL_IW
100extern int ixl_enable_iwarp;
101extern int ixl_limit_iwarp_msix;
102#endif
103
104static const char * const ixl_fc_string[6] = {
105 "None",
106 "Rx",
107 "Tx",
108 "Full",
109 "Priority",
110 "Default"
111};
112
113static char *ixl_fec_string[3] = {
114 "CL108 RS-FEC",
115 "CL74 FC-FEC/BASE-R",
116 "None"
117};
118
119MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
120
121/*
122** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
123*/
124void
125ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
126{
127 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
128 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
129 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
130
131 sbuf_printf(buf,
132 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
133 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
134 hw->aq.api_maj_ver, hw->aq.api_min_ver,
135 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
136 IXL_NVM_VERSION_HI_SHIFT,
137 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
138 IXL_NVM_VERSION_LO_SHIFT,
139 hw->nvm.eetrack,
140 oem_ver, oem_build, oem_patch);
141}
142
143void
144ixl_print_nvm_version(struct ixl_pf *pf)
145{
146 struct i40e_hw *hw = &pf->hw;
147 device_t dev = pf->dev;
148 struct sbuf *sbuf;
149
150 sbuf = sbuf_new_auto();
151 ixl_nvm_version_str(hw, sbuf);
152 sbuf_finish(sbuf);
153 device_printf(dev, "%s\n", sbuf_data(sbuf));
154 sbuf_delete(sbuf);
155}
156
157/**
158 * ixl_get_fw_mode - Check the state of FW
159 * @hw: device hardware structure
160 *
161 * Identify state of FW. It might be in a recovery mode
162 * which limits functionality and requires special handling
163 * from the driver.
164 *
165 * @returns FW mode (normal, recovery, unexpected EMP reset)
166 */
167static enum ixl_fw_mode
168ixl_get_fw_mode(struct ixl_pf *pf)
169{
170 struct i40e_hw *hw = &pf->hw;
171 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
172 u32 fwsts;
173
174#ifdef IXL_DEBUG
175 if (pf->recovery_mode)
176 return IXL_FW_MODE_RECOVERY;
177#endif
178 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
179
180 /* Is set and has one of expected values */
181 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
182 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
183 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
184 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
185 fw_mode = IXL_FW_MODE_RECOVERY;
186 else {
187 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
188 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
189 fw_mode = IXL_FW_MODE_UEMPR;
190 }
191 return (fw_mode);
192}
193
194/**
195 * ixl_pf_reset - Reset the PF
196 * @pf: PF structure
197 *
198 * Ensure that FW is in the right state and do the reset
199 * if needed.
200 *
201 * @returns zero on success, or an error code on failure.
202 */
203int
204ixl_pf_reset(struct ixl_pf *pf)
205{
206 struct i40e_hw *hw = &pf->hw;
207 enum i40e_status_code status;
208 enum ixl_fw_mode fw_mode;
209
210 fw_mode = ixl_get_fw_mode(pf);
211 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
212 if (fw_mode == IXL_FW_MODE_RECOVERY) {
213 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
214 /* Don't try to reset device if it's in recovery mode */
215 return (0);
216 }
217
218 status = i40e_pf_reset(hw);
219 if (status == I40E_SUCCESS)
220 return (0);
221
222 /* Check FW mode again in case it has changed while
223 * waiting for reset to complete */
224 fw_mode = ixl_get_fw_mode(pf);
225 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
226 if (fw_mode == IXL_FW_MODE_RECOVERY) {
227 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
228 return (0);
229 }
230
231 if (fw_mode == IXL_FW_MODE_UEMPR)
232 device_printf(pf->dev,
233 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
234 else
235 device_printf(pf->dev, "PF reset failure %s\n",
236 i40e_stat_str(hw, status));
237 return (EIO);
238}
239
240/**
241 * ixl_setup_hmc - Setup LAN Host Memory Cache
242 * @pf: PF structure
243 *
244 * Init and configure LAN Host Memory Cache
245 *
246 * @returns 0 on success, EIO on error
247 */
248int
249ixl_setup_hmc(struct ixl_pf *pf)
250{
251 struct i40e_hw *hw = &pf->hw;
252 enum i40e_status_code status;
253
254 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
255 hw->func_caps.num_rx_qp, 0, 0);
256 if (status) {
257 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
258 i40e_stat_str(hw, status));
259 return (EIO);
260 }
261
262 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
263 if (status) {
264 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
265 i40e_stat_str(hw, status));
266 return (EIO);
267 }
268
269 return (0);
270}
271
272/**
273 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
274 * @pf: PF structure
275 *
276 * Shutdown Host Memory Cache if configured.
277 *
278 */
279void
280ixl_shutdown_hmc(struct ixl_pf *pf)
281{
282 struct i40e_hw *hw = &pf->hw;
283 enum i40e_status_code status;
284
285 /* HMC not configured, no need to shutdown */
286 if (hw->hmc.hmc_obj == NULL)
287 return;
288
289 status = i40e_shutdown_lan_hmc(hw);
290 if (status)
291 device_printf(pf->dev,
292 "Shutdown LAN HMC failed with code %s\n",
293 i40e_stat_str(hw, status));
294}
295/*
296 * Write PF ITR values to queue ITR registers.
297 */
298void
299ixl_configure_itr(struct ixl_pf *pf)
300{
301 ixl_configure_tx_itr(pf);
302 ixl_configure_rx_itr(pf);
303}
304
305/*********************************************************************
306 *
307 * Get the hardware capabilities
308 *
309 **********************************************************************/
310
311int
312ixl_get_hw_capabilities(struct ixl_pf *pf)
313{
314 struct i40e_aqc_list_capabilities_element_resp *buf;
315 struct i40e_hw *hw = &pf->hw;
316 device_t dev = pf->dev;
317 enum i40e_status_code status;
318 int len, i2c_intfc_num;
319 bool again = TRUE;
320 u16 needed;
321
322 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
323 hw->func_caps.iwarp = 0;
324 return (0);
325 }
326
327 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
328retry:
329 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
330 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
331 device_printf(dev, "Unable to allocate cap memory\n");
332 return (ENOMEM);
333 }
334
335 /* This populates the hw struct */
336 status = i40e_aq_discover_capabilities(hw, buf, len,
337 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
338 free(buf, M_IXL);
339 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
340 (again == TRUE)) {
341 /* retry once with a larger buffer */
342 again = FALSE;
343 len = needed;
344 goto retry;
345 } else if (status != I40E_SUCCESS) {
346 device_printf(dev, "capability discovery failed; status %s, error %s\n",
347 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
348 return (ENODEV);
349 }
350
351 /*
352 * Some devices have both MDIO and I2C; since this isn't reported
353 * by the FW, check registers to see if an I2C interface exists.
354 */
355 i2c_intfc_num = ixl_find_i2c_interface(pf);
356 if (i2c_intfc_num != -1)
357 pf->has_i2c = true;
358
359 /* Determine functions to use for driver I2C accesses */
360 switch (pf->i2c_access_method) {
361 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
362 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
363 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
364 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
365 } else {
366 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
367 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
368 }
369 break;
370 }
371 case IXL_I2C_ACCESS_METHOD_AQ:
372 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
373 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
374 break;
375 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
376 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
377 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
378 break;
379 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
380 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
381 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
382 break;
383 default:
384 /* Should not happen */
385 device_printf(dev, "Error setting I2C access functions\n");
386 break;
387 }
388
389 /* Keep link active by default */
390 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
391
392 /* Print a subset of the capability information. */
393 device_printf(dev,
394 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
395 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
396 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
397 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
398 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
399 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
400 "MDIO shared");
401
402 return (0);
403}
404
405/* For the set_advertise sysctl */
406void
407ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
408{
409 device_t dev = pf->dev;
410 int err;
411
412 /* Make sure to initialize the device to the complete list of
413 * supported speeds on driver load, to ensure unloading and
414 * reloading the driver will restore this value.
415 */
416 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
417 if (err) {
418 /* Non-fatal error */
419 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
420 __func__, err);
421 return;
422 }
423
424 pf->advertised_speed =
425 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
426}
427
428int
429ixl_teardown_hw_structs(struct ixl_pf *pf)
430{
431 enum i40e_status_code status = 0;
432 struct i40e_hw *hw = &pf->hw;
433 device_t dev = pf->dev;
434
435 /* Shutdown LAN HMC */
436 if (hw->hmc.hmc_obj) {
437 status = i40e_shutdown_lan_hmc(hw);
438 if (status) {
439 device_printf(dev,
440 "init: LAN HMC shutdown failure; status %s\n",
441 i40e_stat_str(hw, status));
442 goto err_out;
443 }
444 }
445
446 /* Shutdown admin queue */
447 ixl_disable_intr0(hw);
448 status = i40e_shutdown_adminq(hw);
449 if (status)
450 device_printf(dev,
451 "init: Admin Queue shutdown failure; status %s\n",
452 i40e_stat_str(hw, status));
453
454 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
455err_out:
456 return (status);
457}
458
459/*
460** Creates new filter with given MAC address and VLAN ID
461*/
462static struct ixl_mac_filter *
463ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
464{
465 struct ixl_mac_filter *f;
466
467 /* create a new empty filter */
468 f = malloc(sizeof(struct ixl_mac_filter),
469 M_IXL, M_NOWAIT | M_ZERO);
470 if (f) {
471 LIST_INSERT_HEAD(headp, f, ftle);
472 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
473 f->vlan = vlan;
474 }
475
476 return (f);
477}
478
479/**
480 * ixl_free_filters - Free all filters in given list
481 * headp - pointer to list head
482 *
483 * Frees memory used by each entry in the list.
484 * Does not remove filters from HW.
485 */
486void
487ixl_free_filters(struct ixl_ftl_head *headp)
488{
489 struct ixl_mac_filter *f, *nf;
490
491 f = LIST_FIRST(headp);
492 while (f != NULL) {
493 nf = LIST_NEXT(f, ftle);
494 free(f, M_IXL);
495 f = nf;
496 }
497
498 LIST_INIT(headp);
499}
500
501static u_int
502ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
503{
504 struct ixl_add_maddr_arg *ama = arg;
505 struct ixl_vsi *vsi = ama->vsi;
506 const u8 *macaddr = (u8*)LLADDR(sdl);
507 struct ixl_mac_filter *f;
508
509 /* Does one already exist */
510 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
511 if (f != NULL)
512 return (0);
513
514 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
515 if (f == NULL) {
516 device_printf(vsi->dev, "WARNING: no filter available!!\n");
517 return (0);
518 }
519 f->flags |= IXL_FILTER_MC;
520
521 return (1);
522}
523
524/*********************************************************************
525 * Filter Routines
526 *
527 * Routines for multicast and vlan filter management.
528 *
529 *********************************************************************/
530void
531ixl_add_multi(struct ixl_vsi *vsi)
532{
1/******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35
36#include "ixl_pf.h"
37
38#ifdef PCI_IOV
39#include "ixl_pf_iov.h"
40#endif
41
42#ifdef IXL_IW
43#include "ixl_iw.h"
44#include "ixl_iw_int.h"
45#endif
46
47static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52static char * ixl_switch_element_string(struct sbuf *, u8, u16);
53static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
54
55/* Sysctls */
56static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
63
64static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
65static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
66
67/* Debug Sysctls */
68static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
69static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
70static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88
89/* Debug Sysctls */
90static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94#ifdef IXL_DEBUG
95static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
96static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
97#endif
98
99#ifdef IXL_IW
100extern int ixl_enable_iwarp;
101extern int ixl_limit_iwarp_msix;
102#endif
103
104static const char * const ixl_fc_string[6] = {
105 "None",
106 "Rx",
107 "Tx",
108 "Full",
109 "Priority",
110 "Default"
111};
112
113static char *ixl_fec_string[3] = {
114 "CL108 RS-FEC",
115 "CL74 FC-FEC/BASE-R",
116 "None"
117};
118
119MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
120
121/*
122** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
123*/
124void
125ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
126{
127 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
128 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
129 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
130
131 sbuf_printf(buf,
132 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
133 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
134 hw->aq.api_maj_ver, hw->aq.api_min_ver,
135 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
136 IXL_NVM_VERSION_HI_SHIFT,
137 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
138 IXL_NVM_VERSION_LO_SHIFT,
139 hw->nvm.eetrack,
140 oem_ver, oem_build, oem_patch);
141}
142
143void
144ixl_print_nvm_version(struct ixl_pf *pf)
145{
146 struct i40e_hw *hw = &pf->hw;
147 device_t dev = pf->dev;
148 struct sbuf *sbuf;
149
150 sbuf = sbuf_new_auto();
151 ixl_nvm_version_str(hw, sbuf);
152 sbuf_finish(sbuf);
153 device_printf(dev, "%s\n", sbuf_data(sbuf));
154 sbuf_delete(sbuf);
155}
156
157/**
158 * ixl_get_fw_mode - Check the state of FW
159 * @hw: device hardware structure
160 *
161 * Identify state of FW. It might be in a recovery mode
162 * which limits functionality and requires special handling
163 * from the driver.
164 *
165 * @returns FW mode (normal, recovery, unexpected EMP reset)
166 */
167static enum ixl_fw_mode
168ixl_get_fw_mode(struct ixl_pf *pf)
169{
170 struct i40e_hw *hw = &pf->hw;
171 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
172 u32 fwsts;
173
174#ifdef IXL_DEBUG
175 if (pf->recovery_mode)
176 return IXL_FW_MODE_RECOVERY;
177#endif
178 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
179
180 /* Is set and has one of expected values */
181 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
182 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
183 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
184 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
185 fw_mode = IXL_FW_MODE_RECOVERY;
186 else {
187 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
188 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
189 fw_mode = IXL_FW_MODE_UEMPR;
190 }
191 return (fw_mode);
192}
193
194/**
195 * ixl_pf_reset - Reset the PF
196 * @pf: PF structure
197 *
198 * Ensure that FW is in the right state and do the reset
199 * if needed.
200 *
201 * @returns zero on success, or an error code on failure.
202 */
203int
204ixl_pf_reset(struct ixl_pf *pf)
205{
206 struct i40e_hw *hw = &pf->hw;
207 enum i40e_status_code status;
208 enum ixl_fw_mode fw_mode;
209
210 fw_mode = ixl_get_fw_mode(pf);
211 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
212 if (fw_mode == IXL_FW_MODE_RECOVERY) {
213 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
214 /* Don't try to reset device if it's in recovery mode */
215 return (0);
216 }
217
218 status = i40e_pf_reset(hw);
219 if (status == I40E_SUCCESS)
220 return (0);
221
222 /* Check FW mode again in case it has changed while
223 * waiting for reset to complete */
224 fw_mode = ixl_get_fw_mode(pf);
225 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
226 if (fw_mode == IXL_FW_MODE_RECOVERY) {
227 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
228 return (0);
229 }
230
231 if (fw_mode == IXL_FW_MODE_UEMPR)
232 device_printf(pf->dev,
233 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
234 else
235 device_printf(pf->dev, "PF reset failure %s\n",
236 i40e_stat_str(hw, status));
237 return (EIO);
238}
239
240/**
241 * ixl_setup_hmc - Setup LAN Host Memory Cache
242 * @pf: PF structure
243 *
244 * Init and configure LAN Host Memory Cache
245 *
246 * @returns 0 on success, EIO on error
247 */
248int
249ixl_setup_hmc(struct ixl_pf *pf)
250{
251 struct i40e_hw *hw = &pf->hw;
252 enum i40e_status_code status;
253
254 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
255 hw->func_caps.num_rx_qp, 0, 0);
256 if (status) {
257 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
258 i40e_stat_str(hw, status));
259 return (EIO);
260 }
261
262 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
263 if (status) {
264 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
265 i40e_stat_str(hw, status));
266 return (EIO);
267 }
268
269 return (0);
270}
271
272/**
273 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
274 * @pf: PF structure
275 *
276 * Shutdown Host Memory Cache if configured.
277 *
278 */
279void
280ixl_shutdown_hmc(struct ixl_pf *pf)
281{
282 struct i40e_hw *hw = &pf->hw;
283 enum i40e_status_code status;
284
285 /* HMC not configured, no need to shutdown */
286 if (hw->hmc.hmc_obj == NULL)
287 return;
288
289 status = i40e_shutdown_lan_hmc(hw);
290 if (status)
291 device_printf(pf->dev,
292 "Shutdown LAN HMC failed with code %s\n",
293 i40e_stat_str(hw, status));
294}
295/*
296 * Write PF ITR values to queue ITR registers.
297 */
298void
299ixl_configure_itr(struct ixl_pf *pf)
300{
301 ixl_configure_tx_itr(pf);
302 ixl_configure_rx_itr(pf);
303}
304
305/*********************************************************************
306 *
307 * Get the hardware capabilities
308 *
309 **********************************************************************/
310
311int
312ixl_get_hw_capabilities(struct ixl_pf *pf)
313{
314 struct i40e_aqc_list_capabilities_element_resp *buf;
315 struct i40e_hw *hw = &pf->hw;
316 device_t dev = pf->dev;
317 enum i40e_status_code status;
318 int len, i2c_intfc_num;
319 bool again = TRUE;
320 u16 needed;
321
322 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
323 hw->func_caps.iwarp = 0;
324 return (0);
325 }
326
327 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
328retry:
329 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
330 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
331 device_printf(dev, "Unable to allocate cap memory\n");
332 return (ENOMEM);
333 }
334
335 /* This populates the hw struct */
336 status = i40e_aq_discover_capabilities(hw, buf, len,
337 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
338 free(buf, M_IXL);
339 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
340 (again == TRUE)) {
341 /* retry once with a larger buffer */
342 again = FALSE;
343 len = needed;
344 goto retry;
345 } else if (status != I40E_SUCCESS) {
346 device_printf(dev, "capability discovery failed; status %s, error %s\n",
347 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
348 return (ENODEV);
349 }
350
351 /*
352 * Some devices have both MDIO and I2C; since this isn't reported
353 * by the FW, check registers to see if an I2C interface exists.
354 */
355 i2c_intfc_num = ixl_find_i2c_interface(pf);
356 if (i2c_intfc_num != -1)
357 pf->has_i2c = true;
358
359 /* Determine functions to use for driver I2C accesses */
360 switch (pf->i2c_access_method) {
361 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
362 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
363 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
364 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
365 } else {
366 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
367 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
368 }
369 break;
370 }
371 case IXL_I2C_ACCESS_METHOD_AQ:
372 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
373 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
374 break;
375 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
376 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
377 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
378 break;
379 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
380 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
381 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
382 break;
383 default:
384 /* Should not happen */
385 device_printf(dev, "Error setting I2C access functions\n");
386 break;
387 }
388
389 /* Keep link active by default */
390 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
391
392 /* Print a subset of the capability information. */
393 device_printf(dev,
394 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
395 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
396 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
397 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
398 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
399 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
400 "MDIO shared");
401
402 return (0);
403}
404
405/* For the set_advertise sysctl */
406void
407ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
408{
409 device_t dev = pf->dev;
410 int err;
411
412 /* Make sure to initialize the device to the complete list of
413 * supported speeds on driver load, to ensure unloading and
414 * reloading the driver will restore this value.
415 */
416 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
417 if (err) {
418 /* Non-fatal error */
419 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
420 __func__, err);
421 return;
422 }
423
424 pf->advertised_speed =
425 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
426}
427
428int
429ixl_teardown_hw_structs(struct ixl_pf *pf)
430{
431 enum i40e_status_code status = 0;
432 struct i40e_hw *hw = &pf->hw;
433 device_t dev = pf->dev;
434
435 /* Shutdown LAN HMC */
436 if (hw->hmc.hmc_obj) {
437 status = i40e_shutdown_lan_hmc(hw);
438 if (status) {
439 device_printf(dev,
440 "init: LAN HMC shutdown failure; status %s\n",
441 i40e_stat_str(hw, status));
442 goto err_out;
443 }
444 }
445
446 /* Shutdown admin queue */
447 ixl_disable_intr0(hw);
448 status = i40e_shutdown_adminq(hw);
449 if (status)
450 device_printf(dev,
451 "init: Admin Queue shutdown failure; status %s\n",
452 i40e_stat_str(hw, status));
453
454 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
455err_out:
456 return (status);
457}
458
459/*
460** Creates new filter with given MAC address and VLAN ID
461*/
462static struct ixl_mac_filter *
463ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
464{
465 struct ixl_mac_filter *f;
466
467 /* create a new empty filter */
468 f = malloc(sizeof(struct ixl_mac_filter),
469 M_IXL, M_NOWAIT | M_ZERO);
470 if (f) {
471 LIST_INSERT_HEAD(headp, f, ftle);
472 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
473 f->vlan = vlan;
474 }
475
476 return (f);
477}
478
479/**
480 * ixl_free_filters - Free all filters in given list
481 * headp - pointer to list head
482 *
483 * Frees memory used by each entry in the list.
484 * Does not remove filters from HW.
485 */
486void
487ixl_free_filters(struct ixl_ftl_head *headp)
488{
489 struct ixl_mac_filter *f, *nf;
490
491 f = LIST_FIRST(headp);
492 while (f != NULL) {
493 nf = LIST_NEXT(f, ftle);
494 free(f, M_IXL);
495 f = nf;
496 }
497
498 LIST_INIT(headp);
499}
500
501static u_int
502ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
503{
504 struct ixl_add_maddr_arg *ama = arg;
505 struct ixl_vsi *vsi = ama->vsi;
506 const u8 *macaddr = (u8*)LLADDR(sdl);
507 struct ixl_mac_filter *f;
508
509 /* Does one already exist */
510 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
511 if (f != NULL)
512 return (0);
513
514 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
515 if (f == NULL) {
516 device_printf(vsi->dev, "WARNING: no filter available!!\n");
517 return (0);
518 }
519 f->flags |= IXL_FILTER_MC;
520
521 return (1);
522}
523
524/*********************************************************************
525 * Filter Routines
526 *
527 * Routines for multicast and vlan filter management.
528 *
529 *********************************************************************/
530void
531ixl_add_multi(struct ixl_vsi *vsi)
532{
533 struct ifnet *ifp = vsi->ifp;
533 if_t ifp = vsi->ifp;
534 struct i40e_hw *hw = vsi->hw;
535 int mcnt = 0;
536 struct ixl_add_maddr_arg cb_arg;
537
538 IOCTL_DEBUGOUT("ixl_add_multi: begin");
539
540 mcnt = if_llmaddr_count(ifp);
541 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
542 i40e_aq_set_vsi_multicast_promiscuous(hw,
543 vsi->seid, TRUE, NULL);
544 /* delete all existing MC filters */
545 ixl_del_multi(vsi, true);
546 return;
547 }
548
549 cb_arg.vsi = vsi;
550 LIST_INIT(&cb_arg.to_add);
551
552 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
553 if (mcnt > 0)
554 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
555
556 IOCTL_DEBUGOUT("ixl_add_multi: end");
557}
558
559static u_int
560ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
561{
562 struct ixl_mac_filter *f = arg;
563
564 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
565 return (1);
566 else
567 return (0);
568}
569
570void
571ixl_del_multi(struct ixl_vsi *vsi, bool all)
572{
573 struct ixl_ftl_head to_del;
534 struct i40e_hw *hw = vsi->hw;
535 int mcnt = 0;
536 struct ixl_add_maddr_arg cb_arg;
537
538 IOCTL_DEBUGOUT("ixl_add_multi: begin");
539
540 mcnt = if_llmaddr_count(ifp);
541 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
542 i40e_aq_set_vsi_multicast_promiscuous(hw,
543 vsi->seid, TRUE, NULL);
544 /* delete all existing MC filters */
545 ixl_del_multi(vsi, true);
546 return;
547 }
548
549 cb_arg.vsi = vsi;
550 LIST_INIT(&cb_arg.to_add);
551
552 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
553 if (mcnt > 0)
554 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
555
556 IOCTL_DEBUGOUT("ixl_add_multi: end");
557}
558
559static u_int
560ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
561{
562 struct ixl_mac_filter *f = arg;
563
564 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
565 return (1);
566 else
567 return (0);
568}
569
570void
571ixl_del_multi(struct ixl_vsi *vsi, bool all)
572{
573 struct ixl_ftl_head to_del;
574 struct ifnet *ifp = vsi->ifp;
574 if_t ifp = vsi->ifp;
575 struct ixl_mac_filter *f, *fn;
576 int mcnt = 0;
577
578 IOCTL_DEBUGOUT("ixl_del_multi: begin");
579
580 LIST_INIT(&to_del);
581 /* Search for removed multicast addresses */
582 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
583 if ((f->flags & IXL_FILTER_MC) == 0 ||
584 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
585 continue;
586
587 LIST_REMOVE(f, ftle);
588 LIST_INSERT_HEAD(&to_del, f, ftle);
589 mcnt++;
590 }
591
592 if (mcnt > 0)
593 ixl_del_hw_filters(vsi, &to_del, mcnt);
594}
595
596void
597ixl_link_up_msg(struct ixl_pf *pf)
598{
599 struct i40e_hw *hw = &pf->hw;
575 struct ixl_mac_filter *f, *fn;
576 int mcnt = 0;
577
578 IOCTL_DEBUGOUT("ixl_del_multi: begin");
579
580 LIST_INIT(&to_del);
581 /* Search for removed multicast addresses */
582 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
583 if ((f->flags & IXL_FILTER_MC) == 0 ||
584 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
585 continue;
586
587 LIST_REMOVE(f, ftle);
588 LIST_INSERT_HEAD(&to_del, f, ftle);
589 mcnt++;
590 }
591
592 if (mcnt > 0)
593 ixl_del_hw_filters(vsi, &to_del, mcnt);
594}
595
596void
597ixl_link_up_msg(struct ixl_pf *pf)
598{
599 struct i40e_hw *hw = &pf->hw;
600 struct ifnet *ifp = pf->vsi.ifp;
600 if_t ifp = pf->vsi.ifp;
601 char *req_fec_string, *neg_fec_string;
602 u8 fec_abilities;
603
604 fec_abilities = hw->phy.link_info.req_fec_info;
605 /* If both RS and KR are requested, only show RS */
606 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
607 req_fec_string = ixl_fec_string[0];
608 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
609 req_fec_string = ixl_fec_string[1];
610 else
611 req_fec_string = ixl_fec_string[2];
612
613 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
614 neg_fec_string = ixl_fec_string[0];
615 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
616 neg_fec_string = ixl_fec_string[1];
617 else
618 neg_fec_string = ixl_fec_string[2];
619
620 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
601 char *req_fec_string, *neg_fec_string;
602 u8 fec_abilities;
603
604 fec_abilities = hw->phy.link_info.req_fec_info;
605 /* If both RS and KR are requested, only show RS */
606 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
607 req_fec_string = ixl_fec_string[0];
608 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
609 req_fec_string = ixl_fec_string[1];
610 else
611 req_fec_string = ixl_fec_string[2];
612
613 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
614 neg_fec_string = ixl_fec_string[0];
615 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
616 neg_fec_string = ixl_fec_string[1];
617 else
618 neg_fec_string = ixl_fec_string[2];
619
620 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
621 ifp->if_xname,
621 if_name(ifp),
622 ixl_link_speed_string(hw->phy.link_info.link_speed),
623 req_fec_string, neg_fec_string,
624 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
625 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
626 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
627 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
628 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
629 ixl_fc_string[1] : ixl_fc_string[0]);
630}
631
632/*
633 * Configure admin queue/misc interrupt cause registers in hardware.
634 */
635void
636ixl_configure_intr0_msix(struct ixl_pf *pf)
637{
638 struct i40e_hw *hw = &pf->hw;
639 u32 reg;
640
641 /* First set up the adminq - vector 0 */
642 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
643 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
644
645 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
646 I40E_PFINT_ICR0_ENA_GRST_MASK |
647 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
648 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
649 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
650 I40E_PFINT_ICR0_ENA_VFLR_MASK |
651 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
652 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
653 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
654
655 /*
656 * 0x7FF is the end of the queue list.
657 * This means we won't use MSI-X vector 0 for a queue interrupt
658 * in MSI-X mode.
659 */
660 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
661 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
662 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
663
664 wr32(hw, I40E_PFINT_DYN_CTL0,
665 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
666 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
667
668 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
669}
670
671void
672ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
673{
674 /* Display supported media types */
675 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
676 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
677
678 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
679 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
680 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
681 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
682 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
683 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
684
685 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
686 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
687
688 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
689 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
690
691 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
692 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
693 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
694 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
695
696 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
697 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
698 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
699 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
700 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
701 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
702
703 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
704 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
705 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
706 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
707 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
708 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
709 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
710 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
711 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
712 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
713
714 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
715 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
716
717 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
718 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
719 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
720 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
721 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
722 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
723 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
724 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
725 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
726 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
727 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
728
729 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
730 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
731
732 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
733 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
734 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
735 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
736
737 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
738 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
739 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
740 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
741 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
742 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
743 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
744 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
745 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
746 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
747 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
748 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
749}
750
751/*********************************************************************
752 *
753 * Get Firmware Switch configuration
754 * - this will need to be more robust when more complex
755 * switch configurations are enabled.
756 *
757 **********************************************************************/
758int
759ixl_switch_config(struct ixl_pf *pf)
760{
761 struct i40e_hw *hw = &pf->hw;
762 struct ixl_vsi *vsi = &pf->vsi;
763 device_t dev = iflib_get_dev(vsi->ctx);
764 struct i40e_aqc_get_switch_config_resp *sw_config;
765 u8 aq_buf[I40E_AQ_LARGE_BUF];
766 int ret;
767 u16 next = 0;
768
769 memset(&aq_buf, 0, sizeof(aq_buf));
770 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
771 ret = i40e_aq_get_switch_config(hw, sw_config,
772 sizeof(aq_buf), &next, NULL);
773 if (ret) {
774 device_printf(dev, "aq_get_switch_config() failed, error %d,"
775 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
776 return (ret);
777 }
778 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
779 device_printf(dev,
780 "Switch config: header reported: %d in structure, %d total\n",
781 LE16_TO_CPU(sw_config->header.num_reported),
782 LE16_TO_CPU(sw_config->header.num_total));
783 for (int i = 0;
784 i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
785 device_printf(dev,
786 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
787 sw_config->element[i].element_type,
788 LE16_TO_CPU(sw_config->element[i].seid),
789 LE16_TO_CPU(sw_config->element[i].uplink_seid),
790 LE16_TO_CPU(sw_config->element[i].downlink_seid));
791 }
792 }
793 /* Simplified due to a single VSI */
794 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
795 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
796 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
797 return (ret);
798}
799
800void
801ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
802{
803 struct sysctl_oid *tree;
804 struct sysctl_oid_list *child;
805 struct sysctl_oid_list *vsi_list;
806
807 tree = device_get_sysctl_tree(vsi->dev);
808 child = SYSCTL_CHILDREN(tree);
809 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
810 CTLFLAG_RD, NULL, "VSI Number");
811
812 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
813 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
814
815 /* Copy of netstat RX errors counter for validation purposes */
816 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
817 CTLFLAG_RD, &vsi->ierrors,
818 "RX packet errors");
819
820 if (queues_sysctls)
821 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
822}
823
824/*
825 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
826 * Writes to the ITR registers immediately.
827 */
828static int
829ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
830{
831 struct ixl_pf *pf = (struct ixl_pf *)arg1;
832 device_t dev = pf->dev;
833 int error = 0;
834 int requested_tx_itr;
835
836 requested_tx_itr = pf->tx_itr;
837 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
838 if ((error) || (req->newptr == NULL))
839 return (error);
840 if (pf->dynamic_tx_itr) {
841 device_printf(dev,
842 "Cannot set TX itr value while dynamic TX itr is enabled\n");
843 return (EINVAL);
844 }
845 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
846 device_printf(dev,
847 "Invalid TX itr value; value must be between 0 and %d\n",
848 IXL_MAX_ITR);
849 return (EINVAL);
850 }
851
852 pf->tx_itr = requested_tx_itr;
853 ixl_configure_tx_itr(pf);
854
855 return (error);
856}
857
858/*
859 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
860 * Writes to the ITR registers immediately.
861 */
862static int
863ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
864{
865 struct ixl_pf *pf = (struct ixl_pf *)arg1;
866 device_t dev = pf->dev;
867 int error = 0;
868 int requested_rx_itr;
869
870 requested_rx_itr = pf->rx_itr;
871 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
872 if ((error) || (req->newptr == NULL))
873 return (error);
874 if (pf->dynamic_rx_itr) {
875 device_printf(dev,
876 "Cannot set RX itr value while dynamic RX itr is enabled\n");
877 return (EINVAL);
878 }
879 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
880 device_printf(dev,
881 "Invalid RX itr value; value must be between 0 and %d\n",
882 IXL_MAX_ITR);
883 return (EINVAL);
884 }
885
886 pf->rx_itr = requested_rx_itr;
887 ixl_configure_rx_itr(pf);
888
889 return (error);
890}
891
892void
893ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
894 struct sysctl_oid_list *child,
895 struct i40e_hw_port_stats *stats)
896{
897 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
898 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
899 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
900
901 struct i40e_eth_stats *eth_stats = &stats->eth;
902 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
903
904 struct ixl_sysctl_info ctls[] =
905 {
906 {&stats->crc_errors, "crc_errors", "CRC Errors"},
907 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
908 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
909 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
910 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
911 /* Packet Reception Stats */
912 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
913 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
914 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
915 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
916 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
917 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
918 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
919 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
920 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
921 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
922 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
923 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
924 /* Packet Transmission Stats */
925 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
926 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
927 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
928 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
929 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
930 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
931 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
932 /* Flow control */
933 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
934 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
935 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
936 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
937 /* End */
938 {0,0,0}
939 };
940
941 struct ixl_sysctl_info *entry = ctls;
942 while (entry->stat != 0)
943 {
944 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
945 CTLFLAG_RD, entry->stat,
946 entry->description);
947 entry++;
948 }
949}
950
951void
952ixl_set_rss_key(struct ixl_pf *pf)
953{
954 struct i40e_hw *hw = &pf->hw;
955 struct ixl_vsi *vsi = &pf->vsi;
956 device_t dev = pf->dev;
957 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
958 enum i40e_status_code status;
959
960#ifdef RSS
961 /* Fetch the configured RSS key */
962 rss_getkey((uint8_t *) &rss_seed);
963#else
964 ixl_get_default_rss_key(rss_seed);
965#endif
966 /* Fill out hash function seed */
967 if (hw->mac.type == I40E_MAC_X722) {
968 struct i40e_aqc_get_set_rss_key_data key_data;
969 bcopy(rss_seed, &key_data, 52);
970 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
971 if (status)
972 device_printf(dev,
973 "i40e_aq_set_rss_key status %s, error %s\n",
974 i40e_stat_str(hw, status),
975 i40e_aq_str(hw, hw->aq.asq_last_status));
976 } else {
977 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
978 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
979 }
980}
981
982/*
983 * Configure enabled PCTYPES for RSS.
984 */
985void
986ixl_set_rss_pctypes(struct ixl_pf *pf)
987{
988 struct i40e_hw *hw = &pf->hw;
989 u64 set_hena = 0, hena;
990
991#ifdef RSS
992 u32 rss_hash_config;
993
994 rss_hash_config = rss_gethashconfig();
995 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
996 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
997 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
998 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
999 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1000 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1001 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1002 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1003 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1004 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1005 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1006 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1007 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1008 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1009#else
1010 if (hw->mac.type == I40E_MAC_X722)
1011 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1012 else
1013 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1014#endif
1015 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1016 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1017 hena |= set_hena;
1018 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1019 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1020
1021}
1022
1023/*
1024** Setup the PF's RSS parameters.
1025*/
1026void
1027ixl_config_rss(struct ixl_pf *pf)
1028{
1029 ixl_set_rss_key(pf);
1030 ixl_set_rss_pctypes(pf);
1031 ixl_set_rss_hlut(pf);
1032}
1033
1034/*
1035 * In some firmware versions there is default MAC/VLAN filter
1036 * configured which interferes with filters managed by driver.
1037 * Make sure it's removed.
1038 */
1039void
1040ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1041{
1042 struct i40e_aqc_remove_macvlan_element_data e;
1043
1044 bzero(&e, sizeof(e));
1045 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1046 e.vlan_tag = 0;
1047 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1048 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1049
1050 bzero(&e, sizeof(e));
1051 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1052 e.vlan_tag = 0;
1053 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1054 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1055 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1056}
1057
1058/*
1059** Initialize filter list and add filters that the hardware
1060** needs to know about.
1061**
1062** Requires VSI's seid to be set before calling.
1063*/
1064void
1065ixl_init_filters(struct ixl_vsi *vsi)
1066{
1067 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1068
1069 ixl_dbg_filter(pf, "%s: start\n", __func__);
1070
1071 /* Initialize mac filter list for VSI */
1072 LIST_INIT(&vsi->ftl);
1073 vsi->num_hw_filters = 0;
1074
1075 /* Receive broadcast Ethernet frames */
1076 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1077
1078 if (IXL_VSI_IS_VF(vsi))
1079 return;
1080
1081 ixl_del_default_hw_filters(vsi);
1082
1083 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1084
1085 /*
1086 * Prevent Tx flow control frames from being sent out by
1087 * non-firmware transmitters.
1088 * This affects every VSI in the PF.
1089 */
1090#ifndef IXL_DEBUG_FC
1091 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1092#else
1093 if (pf->enable_tx_fc_filter)
1094 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1095#endif
1096}
1097
1098void
1099ixl_reconfigure_filters(struct ixl_vsi *vsi)
1100{
1101 struct i40e_hw *hw = vsi->hw;
1102 struct ixl_ftl_head tmp;
1103 int cnt;
1104
1105 /*
1106 * The ixl_add_hw_filters function adds filters configured
1107 * in HW to a list in VSI. Move all filters to a temporary
1108 * list to avoid corrupting it by concatenating to itself.
1109 */
1110 LIST_INIT(&tmp);
1111 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1112 cnt = vsi->num_hw_filters;
1113 vsi->num_hw_filters = 0;
1114
1115 ixl_add_hw_filters(vsi, &tmp, cnt);
1116
1117 /*
1118 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp
1119 * will be NULL. Furthermore, the ftl of such vsi already contains
1120 * IXL_VLAN_ANY filter so we can skip that as well.
1121 */
1122 if (hw == NULL)
1123 return;
1124
1125 /* Filter could be removed if MAC address was changed */
1126 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1127
1128 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1129 return;
1130 /*
1131 * VLAN HW filtering is enabled, make sure that filters
1132 * for all registered VLAN tags are configured
1133 */
1134 ixl_add_vlan_filters(vsi, hw->mac.addr);
1135}
1136
1137/*
1138 * This routine adds a MAC/VLAN filter to the software filter
1139 * list, then adds that new filter to the HW if it doesn't already
1140 * exist in the SW filter list.
1141 */
1142void
1143ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1144{
1145 struct ixl_mac_filter *f, *tmp;
1146 struct ixl_pf *pf;
1147 device_t dev;
1148 struct ixl_ftl_head to_add;
1149 int to_add_cnt;
1150
1151 pf = vsi->back;
1152 dev = pf->dev;
1153 to_add_cnt = 1;
1154
1155 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1156 MAC_FORMAT_ARGS(macaddr), vlan);
1157
1158 /* Does one already exist */
1159 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1160 if (f != NULL)
1161 return;
1162
1163 LIST_INIT(&to_add);
1164 f = ixl_new_filter(&to_add, macaddr, vlan);
1165 if (f == NULL) {
1166 device_printf(dev, "WARNING: no filter available!!\n");
1167 return;
1168 }
1169 if (f->vlan != IXL_VLAN_ANY)
1170 f->flags |= IXL_FILTER_VLAN;
1171 else
1172 vsi->num_macs++;
1173
1174 /*
1175 ** Is this the first vlan being registered, if so we
1176 ** need to remove the ANY filter that indicates we are
1177 ** not in a vlan, and replace that with a 0 filter.
1178 */
1179 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1180 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1181 if (tmp != NULL) {
1182 struct ixl_ftl_head to_del;
1183
1184 /* Prepare new filter first to avoid removing
1185 * VLAN_ANY filter if allocation fails */
1186 f = ixl_new_filter(&to_add, macaddr, 0);
1187 if (f == NULL) {
1188 device_printf(dev, "WARNING: no filter available!!\n");
1189 free(LIST_FIRST(&to_add), M_IXL);
1190 return;
1191 }
1192 to_add_cnt++;
1193
1194 LIST_REMOVE(tmp, ftle);
1195 LIST_INIT(&to_del);
1196 LIST_INSERT_HEAD(&to_del, tmp, ftle);
1197 ixl_del_hw_filters(vsi, &to_del, 1);
1198 }
1199 }
1200
1201 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1202}
1203
1204/**
1205 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1206 * @vsi: pointer to VSI
1207 * @macaddr: MAC address
1208 *
1209 * Adds MAC/VLAN filter for each VLAN configured on the interface
1210 * if there is enough HW filters. Otherwise adds a single filter
1211 * for all tagged and untagged frames to allow all configured VLANs
1212 * to recieve traffic.
1213 */
1214void
1215ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1216{
1217 struct ixl_ftl_head to_add;
1218 struct ixl_mac_filter *f;
1219 int to_add_cnt = 0;
1220 int i, vlan = 0;
1221
1222 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1223 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1224 return;
1225 }
1226 LIST_INIT(&to_add);
1227
1228 /* Add filter for untagged frames if it does not exist yet */
1229 f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1230 if (f == NULL) {
1231 f = ixl_new_filter(&to_add, macaddr, 0);
1232 if (f == NULL) {
1233 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1234 return;
1235 }
1236 to_add_cnt++;
1237 }
1238
1239 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1240 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1241 if (vlan == -1)
1242 break;
1243
1244 /* Does one already exist */
1245 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1246 if (f != NULL)
1247 continue;
1248
1249 f = ixl_new_filter(&to_add, macaddr, vlan);
1250 if (f == NULL) {
1251 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1252 ixl_free_filters(&to_add);
1253 return;
1254 }
1255 to_add_cnt++;
1256 }
1257
1258 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1259}
1260
1261void
1262ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1263{
1264 struct ixl_mac_filter *f, *tmp;
1265 struct ixl_ftl_head ftl_head;
1266 int to_del_cnt = 1;
1267
1268 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1269 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1270 MAC_FORMAT_ARGS(macaddr), vlan);
1271
1272 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1273 if (f == NULL)
1274 return;
1275
1276 LIST_REMOVE(f, ftle);
1277 LIST_INIT(&ftl_head);
1278 LIST_INSERT_HEAD(&ftl_head, f, ftle);
1279 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1280 vsi->num_macs--;
1281
1282 /* If this is not the last vlan just remove the filter */
1283 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1284 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1285 return;
1286 }
1287
1288 /* It's the last vlan, we need to switch back to a non-vlan filter */
1289 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1290 if (tmp != NULL) {
1291 LIST_REMOVE(tmp, ftle);
1292 LIST_INSERT_AFTER(f, tmp, ftle);
1293 to_del_cnt++;
1294 }
1295 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1296
1297 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1298}
1299
1300/**
1301 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1302 * @vsi: VSI which filters need to be removed
1303 * @macaddr: MAC address
1304 *
1305 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1306 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1307 * so skip them to speed up processing. Those filters should be removed
1308 * using ixl_del_filter function.
1309 */
1310void
1311ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1312{
1313 struct ixl_mac_filter *f, *tmp;
1314 struct ixl_ftl_head to_del;
1315 int to_del_cnt = 0;
1316
1317 LIST_INIT(&to_del);
1318
1319 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1320 if ((f->flags & IXL_FILTER_MC) != 0 ||
1321 !ixl_ether_is_equal(f->macaddr, macaddr))
1322 continue;
1323
1324 LIST_REMOVE(f, ftle);
1325 LIST_INSERT_HEAD(&to_del, f, ftle);
1326 to_del_cnt++;
1327 }
1328
1329 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1330 "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1331 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1332 if (to_del_cnt > 0)
1333 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1334}
1335
1336/*
1337** Find the filter with both matching mac addr and vlan id
1338*/
1339struct ixl_mac_filter *
1340ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1341{
1342 struct ixl_mac_filter *f;
1343
1344 LIST_FOREACH(f, headp, ftle) {
1345 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1346 (f->vlan == vlan)) {
1347 return (f);
1348 }
1349 }
1350
1351 return (NULL);
1352}
1353
1354/*
1355** This routine takes additions to the vsi filter
1356** table and creates an Admin Queue call to create
1357** the filters in the hardware.
1358*/
1359void
1360ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1361{
1362 struct i40e_aqc_add_macvlan_element_data *a, *b;
1363 struct ixl_mac_filter *f, *fn;
1364 struct ixl_pf *pf;
1365 struct i40e_hw *hw;
1366 device_t dev;
1367 enum i40e_status_code status;
1368 int j = 0;
1369
1370 pf = vsi->back;
1371 dev = vsi->dev;
1372 hw = &pf->hw;
1373
1374 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1375
1376 if (cnt < 1) {
1377 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1378 return;
1379 }
1380
1381 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1382 M_IXL, M_NOWAIT | M_ZERO);
1383 if (a == NULL) {
1384 device_printf(dev, "add_hw_filters failed to get memory\n");
1385 return;
1386 }
1387
1388 LIST_FOREACH(f, to_add, ftle) {
1389 b = &a[j]; // a pox on fvl long names :)
1390 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1391 if (f->vlan == IXL_VLAN_ANY) {
1392 b->vlan_tag = 0;
1393 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1394 } else {
1395 b->vlan_tag = f->vlan;
1396 b->flags = 0;
1397 }
1398 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1399 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1400 MAC_FORMAT_ARGS(f->macaddr));
1401
1402 if (++j == cnt)
1403 break;
1404 }
1405 if (j != cnt) {
1406 /* Something went wrong */
1407 device_printf(dev,
1408 "%s ERROR: list of filters to short expected: %d, found: %d\n",
1409 __func__, cnt, j);
1410 ixl_free_filters(to_add);
1411 goto out_free;
1412 }
1413
1414 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1415 if (status == I40E_SUCCESS) {
1416 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1417 vsi->num_hw_filters += j;
1418 goto out_free;
1419 }
1420
1421 device_printf(dev,
1422 "i40e_aq_add_macvlan status %s, error %s\n",
1423 i40e_stat_str(hw, status),
1424 i40e_aq_str(hw, hw->aq.asq_last_status));
1425 j = 0;
1426
1427 /* Verify which filters were actually configured in HW
1428 * and add them to the list */
1429 LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1430 LIST_REMOVE(f, ftle);
1431 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1432 ixl_dbg_filter(pf,
1433 "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1434 __func__,
1435 MAC_FORMAT_ARGS(f->macaddr),
1436 f->vlan);
1437 free(f, M_IXL);
1438 } else {
1439 LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1440 vsi->num_hw_filters++;
1441 }
1442 j++;
1443 }
1444
1445out_free:
1446 free(a, M_IXL);
1447}
1448
1449/*
1450** This routine takes removals in the vsi filter
1451** table and creates an Admin Queue call to delete
1452** the filters in the hardware.
1453*/
1454void
1455ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1456{
1457 struct i40e_aqc_remove_macvlan_element_data *d, *e;
1458 struct ixl_pf *pf;
1459 struct i40e_hw *hw;
1460 device_t dev;
1461 struct ixl_mac_filter *f, *f_temp;
1462 enum i40e_status_code status;
1463 int j = 0;
1464
1465 pf = vsi->back;
1466 hw = &pf->hw;
1467 dev = vsi->dev;
1468
1469 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1470
1471 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1472 M_IXL, M_NOWAIT | M_ZERO);
1473 if (d == NULL) {
1474 device_printf(dev, "%s: failed to get memory\n", __func__);
1475 return;
1476 }
1477
1478 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1479 e = &d[j]; // a pox on fvl long names :)
1480 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1481 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1482 if (f->vlan == IXL_VLAN_ANY) {
1483 e->vlan_tag = 0;
1484 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1485 } else {
1486 e->vlan_tag = f->vlan;
1487 }
1488
1489 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1490 MAC_FORMAT_ARGS(f->macaddr));
1491
1492 /* delete entry from the list */
1493 LIST_REMOVE(f, ftle);
1494 free(f, M_IXL);
1495 if (++j == cnt)
1496 break;
1497 }
1498 if (j != cnt || !LIST_EMPTY(to_del)) {
1499 /* Something went wrong */
1500 device_printf(dev,
1501 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1502 __func__, cnt, j);
1503 ixl_free_filters(to_del);
1504 goto out_free;
1505 }
1506 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1507 if (status) {
1508 device_printf(dev,
1509 "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1510 __func__, i40e_stat_str(hw, status),
1511 i40e_aq_str(hw, hw->aq.asq_last_status));
1512 for (int i = 0; i < j; i++) {
1513 if (d[i].error_code == 0)
1514 continue;
1515 device_printf(dev,
1516 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1517 __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1518 d[i].vlan_tag);
1519 }
1520 }
1521
1522 vsi->num_hw_filters -= j;
1523
1524out_free:
1525 free(d, M_IXL);
1526
1527 ixl_dbg_filter(pf, "%s: end\n", __func__);
1528}
1529
1530int
1531ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1532{
1533 struct i40e_hw *hw = &pf->hw;
1534 int error = 0;
1535 u32 reg;
1536 u16 pf_qidx;
1537
1538 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1539
1540 ixl_dbg(pf, IXL_DBG_EN_DIS,
1541 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1542 pf_qidx, vsi_qidx);
1543
1544 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1545
1546 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1547 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1548 I40E_QTX_ENA_QENA_STAT_MASK;
1549 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1550 /* Verify the enable took */
1551 for (int j = 0; j < 10; j++) {
1552 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1553 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1554 break;
1555 i40e_usec_delay(10);
1556 }
1557 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1558 device_printf(pf->dev, "TX queue %d still disabled!\n",
1559 pf_qidx);
1560 error = ETIMEDOUT;
1561 }
1562
1563 return (error);
1564}
1565
1566int
1567ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1568{
1569 struct i40e_hw *hw = &pf->hw;
1570 int error = 0;
1571 u32 reg;
1572 u16 pf_qidx;
1573
1574 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1575
1576 ixl_dbg(pf, IXL_DBG_EN_DIS,
1577 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1578 pf_qidx, vsi_qidx);
1579
1580 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1581 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1582 I40E_QRX_ENA_QENA_STAT_MASK;
1583 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1584 /* Verify the enable took */
1585 for (int j = 0; j < 10; j++) {
1586 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1587 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1588 break;
1589 i40e_usec_delay(10);
1590 }
1591 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1592 device_printf(pf->dev, "RX queue %d still disabled!\n",
1593 pf_qidx);
1594 error = ETIMEDOUT;
1595 }
1596
1597 return (error);
1598}
1599
1600int
1601ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1602{
1603 int error = 0;
1604
1605 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1606 /* Called function already prints error message */
1607 if (error)
1608 return (error);
1609 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1610 return (error);
1611}
1612
1613/*
1614 * Returns error on first ring that is detected hung.
1615 */
1616int
1617ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1618{
1619 struct i40e_hw *hw = &pf->hw;
1620 int error = 0;
1621 u32 reg;
1622 u16 pf_qidx;
1623
1624 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1625
1626 ixl_dbg(pf, IXL_DBG_EN_DIS,
1627 "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1628 pf_qidx, vsi_qidx);
1629
1630 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1631 i40e_usec_delay(500);
1632
1633 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1634 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1635 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1636 /* Verify the disable took */
1637 for (int j = 0; j < 10; j++) {
1638 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1639 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1640 break;
1641 i40e_msec_delay(10);
1642 }
1643 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1644 device_printf(pf->dev, "TX queue %d still enabled!\n",
1645 pf_qidx);
1646 error = ETIMEDOUT;
1647 }
1648
1649 return (error);
1650}
1651
1652/*
1653 * Returns error on first ring that is detected hung.
1654 */
1655int
1656ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1657{
1658 struct i40e_hw *hw = &pf->hw;
1659 int error = 0;
1660 u32 reg;
1661 u16 pf_qidx;
1662
1663 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1664
1665 ixl_dbg(pf, IXL_DBG_EN_DIS,
1666 "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1667 pf_qidx, vsi_qidx);
1668
1669 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1670 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1671 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1672 /* Verify the disable took */
1673 for (int j = 0; j < 10; j++) {
1674 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1675 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1676 break;
1677 i40e_msec_delay(10);
1678 }
1679 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1680 device_printf(pf->dev, "RX queue %d still enabled!\n",
1681 pf_qidx);
1682 error = ETIMEDOUT;
1683 }
1684
1685 return (error);
1686}
1687
1688int
1689ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1690{
1691 int error = 0;
1692
1693 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1694 /* Called function already prints error message */
1695 if (error)
1696 return (error);
1697 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1698 return (error);
1699}
1700
1701static void
1702ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1703{
1704 struct i40e_hw *hw = &pf->hw;
1705 device_t dev = pf->dev;
1706 struct ixl_vf *vf;
1707 bool mdd_detected = false;
1708 bool pf_mdd_detected = false;
1709 bool vf_mdd_detected = false;
1710 u16 vf_num, queue;
1711 u8 pf_num, event;
1712 u8 pf_mdet_num, vp_mdet_num;
1713 u32 reg;
1714
1715 /* find what triggered the MDD event */
1716 reg = rd32(hw, I40E_GL_MDET_TX);
1717 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1718 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1719 I40E_GL_MDET_TX_PF_NUM_SHIFT;
1720 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1721 I40E_GL_MDET_TX_VF_NUM_SHIFT;
1722 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1723 I40E_GL_MDET_TX_EVENT_SHIFT;
1724 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1725 I40E_GL_MDET_TX_QUEUE_SHIFT;
1726 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1727 mdd_detected = true;
1728 }
1729
1730 if (!mdd_detected)
1731 return;
1732
1733 reg = rd32(hw, I40E_PF_MDET_TX);
1734 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1735 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1736 pf_mdet_num = hw->pf_id;
1737 pf_mdd_detected = true;
1738 }
1739
1740 /* Check if MDD was caused by a VF */
1741 for (int i = 0; i < pf->num_vfs; i++) {
1742 vf = &(pf->vfs[i]);
1743 reg = rd32(hw, I40E_VP_MDET_TX(i));
1744 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1745 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1746 vp_mdet_num = i;
1747 vf->num_mdd_events++;
1748 vf_mdd_detected = true;
1749 }
1750 }
1751
1752 /* Print out an error message */
1753 if (vf_mdd_detected && pf_mdd_detected)
1754 device_printf(dev,
1755 "Malicious Driver Detection event %d"
1756 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1757 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1758 else if (vf_mdd_detected && !pf_mdd_detected)
1759 device_printf(dev,
1760 "Malicious Driver Detection event %d"
1761 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1762 event, queue, pf_num, vf_num, vp_mdet_num);
1763 else if (!vf_mdd_detected && pf_mdd_detected)
1764 device_printf(dev,
1765 "Malicious Driver Detection event %d"
1766 " on TX queue %d, pf number %d (PF-%d)\n",
1767 event, queue, pf_num, pf_mdet_num);
1768 /* Theoretically shouldn't happen */
1769 else
1770 device_printf(dev,
1771 "TX Malicious Driver Detection event (unknown)\n");
1772}
1773
1774static void
1775ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1776{
1777 struct i40e_hw *hw = &pf->hw;
1778 device_t dev = pf->dev;
1779 struct ixl_vf *vf;
1780 bool mdd_detected = false;
1781 bool pf_mdd_detected = false;
1782 bool vf_mdd_detected = false;
1783 u16 queue;
1784 u8 pf_num, event;
1785 u8 pf_mdet_num, vp_mdet_num;
1786 u32 reg;
1787
1788 /*
1789 * GL_MDET_RX doesn't contain VF number information, unlike
1790 * GL_MDET_TX.
1791 */
1792 reg = rd32(hw, I40E_GL_MDET_RX);
1793 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1794 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1795 I40E_GL_MDET_RX_FUNCTION_SHIFT;
1796 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1797 I40E_GL_MDET_RX_EVENT_SHIFT;
1798 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1799 I40E_GL_MDET_RX_QUEUE_SHIFT;
1800 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1801 mdd_detected = true;
1802 }
1803
1804 if (!mdd_detected)
1805 return;
1806
1807 reg = rd32(hw, I40E_PF_MDET_RX);
1808 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1809 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1810 pf_mdet_num = hw->pf_id;
1811 pf_mdd_detected = true;
1812 }
1813
1814 /* Check if MDD was caused by a VF */
1815 for (int i = 0; i < pf->num_vfs; i++) {
1816 vf = &(pf->vfs[i]);
1817 reg = rd32(hw, I40E_VP_MDET_RX(i));
1818 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1819 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1820 vp_mdet_num = i;
1821 vf->num_mdd_events++;
1822 vf_mdd_detected = true;
1823 }
1824 }
1825
1826 /* Print out an error message */
1827 if (vf_mdd_detected && pf_mdd_detected)
1828 device_printf(dev,
1829 "Malicious Driver Detection event %d"
1830 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1831 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1832 else if (vf_mdd_detected && !pf_mdd_detected)
1833 device_printf(dev,
1834 "Malicious Driver Detection event %d"
1835 " on RX queue %d, pf number %d, (VF-%d)\n",
1836 event, queue, pf_num, vp_mdet_num);
1837 else if (!vf_mdd_detected && pf_mdd_detected)
1838 device_printf(dev,
1839 "Malicious Driver Detection event %d"
1840 " on RX queue %d, pf number %d (PF-%d)\n",
1841 event, queue, pf_num, pf_mdet_num);
1842 /* Theoretically shouldn't happen */
1843 else
1844 device_printf(dev,
1845 "RX Malicious Driver Detection event (unknown)\n");
1846}
1847
1848/**
1849 * ixl_handle_mdd_event
1850 *
1851 * Called from interrupt handler to identify possibly malicious vfs
1852 * (But also detects events from the PF, as well)
1853 **/
1854void
1855ixl_handle_mdd_event(struct ixl_pf *pf)
1856{
1857 struct i40e_hw *hw = &pf->hw;
1858 u32 reg;
1859
1860 /*
1861 * Handle both TX/RX because it's possible they could
1862 * both trigger in the same interrupt.
1863 */
1864 ixl_handle_tx_mdd_event(pf);
1865 ixl_handle_rx_mdd_event(pf);
1866
1867 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1868
1869 /* re-enable mdd interrupt cause */
1870 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1871 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1872 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1873 ixl_flush(hw);
1874}
1875
1876void
1877ixl_enable_intr0(struct i40e_hw *hw)
1878{
1879 u32 reg;
1880
1881 /* Use IXL_ITR_NONE so ITR isn't updated here */
1882 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1883 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1884 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1885 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1886}
1887
1888void
1889ixl_disable_intr0(struct i40e_hw *hw)
1890{
1891 u32 reg;
1892
1893 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1894 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1895 ixl_flush(hw);
1896}
1897
1898void
1899ixl_enable_queue(struct i40e_hw *hw, int id)
1900{
1901 u32 reg;
1902
1903 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1904 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1905 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1906 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1907}
1908
1909void
1910ixl_disable_queue(struct i40e_hw *hw, int id)
1911{
1912 u32 reg;
1913
1914 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1915 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1916}
1917
1918void
1919ixl_handle_empr_reset(struct ixl_pf *pf)
1920{
1921 struct ixl_vsi *vsi = &pf->vsi;
622 ixl_link_speed_string(hw->phy.link_info.link_speed),
623 req_fec_string, neg_fec_string,
624 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
625 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
626 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
627 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
628 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
629 ixl_fc_string[1] : ixl_fc_string[0]);
630}
631
632/*
633 * Configure admin queue/misc interrupt cause registers in hardware.
634 */
635void
636ixl_configure_intr0_msix(struct ixl_pf *pf)
637{
638 struct i40e_hw *hw = &pf->hw;
639 u32 reg;
640
641 /* First set up the adminq - vector 0 */
642 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
643 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
644
645 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
646 I40E_PFINT_ICR0_ENA_GRST_MASK |
647 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
648 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
649 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
650 I40E_PFINT_ICR0_ENA_VFLR_MASK |
651 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
652 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
653 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
654
655 /*
656 * 0x7FF is the end of the queue list.
657 * This means we won't use MSI-X vector 0 for a queue interrupt
658 * in MSI-X mode.
659 */
660 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
661 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
662 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
663
664 wr32(hw, I40E_PFINT_DYN_CTL0,
665 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
666 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
667
668 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
669}
670
671void
672ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
673{
674 /* Display supported media types */
675 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
676 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
677
678 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
679 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
680 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
681 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
682 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
683 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
684
685 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
686 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
687
688 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
689 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
690
691 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
692 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
693 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
694 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
695
696 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
697 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
698 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
699 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
700 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
701 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
702
703 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
704 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
705 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
706 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
707 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
708 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
709 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
710 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
711 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
712 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
713
714 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
715 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
716
717 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
718 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
719 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
720 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
721 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
722 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
723 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
724 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
725 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
726 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
727 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
728
729 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
730 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
731
732 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
733 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
734 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
735 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
736
737 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
738 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
739 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
740 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
741 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
742 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
743 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
744 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
745 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
746 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
747 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
748 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
749}
750
751/*********************************************************************
752 *
753 * Get Firmware Switch configuration
754 * - this will need to be more robust when more complex
755 * switch configurations are enabled.
756 *
757 **********************************************************************/
758int
759ixl_switch_config(struct ixl_pf *pf)
760{
761 struct i40e_hw *hw = &pf->hw;
762 struct ixl_vsi *vsi = &pf->vsi;
763 device_t dev = iflib_get_dev(vsi->ctx);
764 struct i40e_aqc_get_switch_config_resp *sw_config;
765 u8 aq_buf[I40E_AQ_LARGE_BUF];
766 int ret;
767 u16 next = 0;
768
769 memset(&aq_buf, 0, sizeof(aq_buf));
770 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
771 ret = i40e_aq_get_switch_config(hw, sw_config,
772 sizeof(aq_buf), &next, NULL);
773 if (ret) {
774 device_printf(dev, "aq_get_switch_config() failed, error %d,"
775 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
776 return (ret);
777 }
778 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
779 device_printf(dev,
780 "Switch config: header reported: %d in structure, %d total\n",
781 LE16_TO_CPU(sw_config->header.num_reported),
782 LE16_TO_CPU(sw_config->header.num_total));
783 for (int i = 0;
784 i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
785 device_printf(dev,
786 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
787 sw_config->element[i].element_type,
788 LE16_TO_CPU(sw_config->element[i].seid),
789 LE16_TO_CPU(sw_config->element[i].uplink_seid),
790 LE16_TO_CPU(sw_config->element[i].downlink_seid));
791 }
792 }
793 /* Simplified due to a single VSI */
794 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
795 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
796 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
797 return (ret);
798}
799
800void
801ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
802{
803 struct sysctl_oid *tree;
804 struct sysctl_oid_list *child;
805 struct sysctl_oid_list *vsi_list;
806
807 tree = device_get_sysctl_tree(vsi->dev);
808 child = SYSCTL_CHILDREN(tree);
809 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
810 CTLFLAG_RD, NULL, "VSI Number");
811
812 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
813 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
814
815 /* Copy of netstat RX errors counter for validation purposes */
816 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
817 CTLFLAG_RD, &vsi->ierrors,
818 "RX packet errors");
819
820 if (queues_sysctls)
821 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
822}
823
824/*
825 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
826 * Writes to the ITR registers immediately.
827 */
828static int
829ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
830{
831 struct ixl_pf *pf = (struct ixl_pf *)arg1;
832 device_t dev = pf->dev;
833 int error = 0;
834 int requested_tx_itr;
835
836 requested_tx_itr = pf->tx_itr;
837 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
838 if ((error) || (req->newptr == NULL))
839 return (error);
840 if (pf->dynamic_tx_itr) {
841 device_printf(dev,
842 "Cannot set TX itr value while dynamic TX itr is enabled\n");
843 return (EINVAL);
844 }
845 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
846 device_printf(dev,
847 "Invalid TX itr value; value must be between 0 and %d\n",
848 IXL_MAX_ITR);
849 return (EINVAL);
850 }
851
852 pf->tx_itr = requested_tx_itr;
853 ixl_configure_tx_itr(pf);
854
855 return (error);
856}
857
858/*
859 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
860 * Writes to the ITR registers immediately.
861 */
862static int
863ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
864{
865 struct ixl_pf *pf = (struct ixl_pf *)arg1;
866 device_t dev = pf->dev;
867 int error = 0;
868 int requested_rx_itr;
869
870 requested_rx_itr = pf->rx_itr;
871 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
872 if ((error) || (req->newptr == NULL))
873 return (error);
874 if (pf->dynamic_rx_itr) {
875 device_printf(dev,
876 "Cannot set RX itr value while dynamic RX itr is enabled\n");
877 return (EINVAL);
878 }
879 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
880 device_printf(dev,
881 "Invalid RX itr value; value must be between 0 and %d\n",
882 IXL_MAX_ITR);
883 return (EINVAL);
884 }
885
886 pf->rx_itr = requested_rx_itr;
887 ixl_configure_rx_itr(pf);
888
889 return (error);
890}
891
892void
893ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
894 struct sysctl_oid_list *child,
895 struct i40e_hw_port_stats *stats)
896{
897 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
898 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
899 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
900
901 struct i40e_eth_stats *eth_stats = &stats->eth;
902 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
903
904 struct ixl_sysctl_info ctls[] =
905 {
906 {&stats->crc_errors, "crc_errors", "CRC Errors"},
907 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
908 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
909 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
910 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
911 /* Packet Reception Stats */
912 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
913 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
914 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
915 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
916 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
917 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
918 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
919 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
920 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
921 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
922 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
923 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
924 /* Packet Transmission Stats */
925 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
926 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
927 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
928 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
929 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
930 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
931 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
932 /* Flow control */
933 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
934 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
935 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
936 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
937 /* End */
938 {0,0,0}
939 };
940
941 struct ixl_sysctl_info *entry = ctls;
942 while (entry->stat != 0)
943 {
944 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
945 CTLFLAG_RD, entry->stat,
946 entry->description);
947 entry++;
948 }
949}
950
951void
952ixl_set_rss_key(struct ixl_pf *pf)
953{
954 struct i40e_hw *hw = &pf->hw;
955 struct ixl_vsi *vsi = &pf->vsi;
956 device_t dev = pf->dev;
957 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
958 enum i40e_status_code status;
959
960#ifdef RSS
961 /* Fetch the configured RSS key */
962 rss_getkey((uint8_t *) &rss_seed);
963#else
964 ixl_get_default_rss_key(rss_seed);
965#endif
966 /* Fill out hash function seed */
967 if (hw->mac.type == I40E_MAC_X722) {
968 struct i40e_aqc_get_set_rss_key_data key_data;
969 bcopy(rss_seed, &key_data, 52);
970 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
971 if (status)
972 device_printf(dev,
973 "i40e_aq_set_rss_key status %s, error %s\n",
974 i40e_stat_str(hw, status),
975 i40e_aq_str(hw, hw->aq.asq_last_status));
976 } else {
977 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
978 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
979 }
980}
981
982/*
983 * Configure enabled PCTYPES for RSS.
984 */
985void
986ixl_set_rss_pctypes(struct ixl_pf *pf)
987{
988 struct i40e_hw *hw = &pf->hw;
989 u64 set_hena = 0, hena;
990
991#ifdef RSS
992 u32 rss_hash_config;
993
994 rss_hash_config = rss_gethashconfig();
995 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
996 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
997 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
998 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
999 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1000 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1001 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1002 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1003 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1004 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1005 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1006 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1007 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1008 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1009#else
1010 if (hw->mac.type == I40E_MAC_X722)
1011 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1012 else
1013 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1014#endif
1015 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1016 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1017 hena |= set_hena;
1018 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1019 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1020
1021}
1022
1023/*
1024** Setup the PF's RSS parameters.
1025*/
1026void
1027ixl_config_rss(struct ixl_pf *pf)
1028{
1029 ixl_set_rss_key(pf);
1030 ixl_set_rss_pctypes(pf);
1031 ixl_set_rss_hlut(pf);
1032}
1033
1034/*
1035 * In some firmware versions there is default MAC/VLAN filter
1036 * configured which interferes with filters managed by driver.
1037 * Make sure it's removed.
1038 */
1039void
1040ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1041{
1042 struct i40e_aqc_remove_macvlan_element_data e;
1043
1044 bzero(&e, sizeof(e));
1045 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1046 e.vlan_tag = 0;
1047 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1048 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1049
1050 bzero(&e, sizeof(e));
1051 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1052 e.vlan_tag = 0;
1053 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1054 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1055 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1056}
1057
1058/*
1059** Initialize filter list and add filters that the hardware
1060** needs to know about.
1061**
1062** Requires VSI's seid to be set before calling.
1063*/
1064void
1065ixl_init_filters(struct ixl_vsi *vsi)
1066{
1067 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1068
1069 ixl_dbg_filter(pf, "%s: start\n", __func__);
1070
1071 /* Initialize mac filter list for VSI */
1072 LIST_INIT(&vsi->ftl);
1073 vsi->num_hw_filters = 0;
1074
1075 /* Receive broadcast Ethernet frames */
1076 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1077
1078 if (IXL_VSI_IS_VF(vsi))
1079 return;
1080
1081 ixl_del_default_hw_filters(vsi);
1082
1083 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1084
1085 /*
1086 * Prevent Tx flow control frames from being sent out by
1087 * non-firmware transmitters.
1088 * This affects every VSI in the PF.
1089 */
1090#ifndef IXL_DEBUG_FC
1091 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1092#else
1093 if (pf->enable_tx_fc_filter)
1094 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1095#endif
1096}
1097
1098void
1099ixl_reconfigure_filters(struct ixl_vsi *vsi)
1100{
1101 struct i40e_hw *hw = vsi->hw;
1102 struct ixl_ftl_head tmp;
1103 int cnt;
1104
1105 /*
1106 * The ixl_add_hw_filters function adds filters configured
1107 * in HW to a list in VSI. Move all filters to a temporary
1108 * list to avoid corrupting it by concatenating to itself.
1109 */
1110 LIST_INIT(&tmp);
1111 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1112 cnt = vsi->num_hw_filters;
1113 vsi->num_hw_filters = 0;
1114
1115 ixl_add_hw_filters(vsi, &tmp, cnt);
1116
1117 /*
1118 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp
1119 * will be NULL. Furthermore, the ftl of such vsi already contains
1120 * IXL_VLAN_ANY filter so we can skip that as well.
1121 */
1122 if (hw == NULL)
1123 return;
1124
1125 /* Filter could be removed if MAC address was changed */
1126 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1127
1128 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1129 return;
1130 /*
1131 * VLAN HW filtering is enabled, make sure that filters
1132 * for all registered VLAN tags are configured
1133 */
1134 ixl_add_vlan_filters(vsi, hw->mac.addr);
1135}
1136
1137/*
1138 * This routine adds a MAC/VLAN filter to the software filter
1139 * list, then adds that new filter to the HW if it doesn't already
1140 * exist in the SW filter list.
1141 */
1142void
1143ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1144{
1145 struct ixl_mac_filter *f, *tmp;
1146 struct ixl_pf *pf;
1147 device_t dev;
1148 struct ixl_ftl_head to_add;
1149 int to_add_cnt;
1150
1151 pf = vsi->back;
1152 dev = pf->dev;
1153 to_add_cnt = 1;
1154
1155 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1156 MAC_FORMAT_ARGS(macaddr), vlan);
1157
1158 /* Does one already exist */
1159 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1160 if (f != NULL)
1161 return;
1162
1163 LIST_INIT(&to_add);
1164 f = ixl_new_filter(&to_add, macaddr, vlan);
1165 if (f == NULL) {
1166 device_printf(dev, "WARNING: no filter available!!\n");
1167 return;
1168 }
1169 if (f->vlan != IXL_VLAN_ANY)
1170 f->flags |= IXL_FILTER_VLAN;
1171 else
1172 vsi->num_macs++;
1173
1174 /*
1175 ** Is this the first vlan being registered, if so we
1176 ** need to remove the ANY filter that indicates we are
1177 ** not in a vlan, and replace that with a 0 filter.
1178 */
1179 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1180 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1181 if (tmp != NULL) {
1182 struct ixl_ftl_head to_del;
1183
1184 /* Prepare new filter first to avoid removing
1185 * VLAN_ANY filter if allocation fails */
1186 f = ixl_new_filter(&to_add, macaddr, 0);
1187 if (f == NULL) {
1188 device_printf(dev, "WARNING: no filter available!!\n");
1189 free(LIST_FIRST(&to_add), M_IXL);
1190 return;
1191 }
1192 to_add_cnt++;
1193
1194 LIST_REMOVE(tmp, ftle);
1195 LIST_INIT(&to_del);
1196 LIST_INSERT_HEAD(&to_del, tmp, ftle);
1197 ixl_del_hw_filters(vsi, &to_del, 1);
1198 }
1199 }
1200
1201 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1202}
1203
1204/**
1205 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1206 * @vsi: pointer to VSI
1207 * @macaddr: MAC address
1208 *
1209 * Adds MAC/VLAN filter for each VLAN configured on the interface
1210 * if there is enough HW filters. Otherwise adds a single filter
1211 * for all tagged and untagged frames to allow all configured VLANs
1212 * to recieve traffic.
1213 */
1214void
1215ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1216{
1217 struct ixl_ftl_head to_add;
1218 struct ixl_mac_filter *f;
1219 int to_add_cnt = 0;
1220 int i, vlan = 0;
1221
1222 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1223 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1224 return;
1225 }
1226 LIST_INIT(&to_add);
1227
1228 /* Add filter for untagged frames if it does not exist yet */
1229 f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1230 if (f == NULL) {
1231 f = ixl_new_filter(&to_add, macaddr, 0);
1232 if (f == NULL) {
1233 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1234 return;
1235 }
1236 to_add_cnt++;
1237 }
1238
1239 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1240 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1241 if (vlan == -1)
1242 break;
1243
1244 /* Does one already exist */
1245 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1246 if (f != NULL)
1247 continue;
1248
1249 f = ixl_new_filter(&to_add, macaddr, vlan);
1250 if (f == NULL) {
1251 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1252 ixl_free_filters(&to_add);
1253 return;
1254 }
1255 to_add_cnt++;
1256 }
1257
1258 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1259}
1260
1261void
1262ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1263{
1264 struct ixl_mac_filter *f, *tmp;
1265 struct ixl_ftl_head ftl_head;
1266 int to_del_cnt = 1;
1267
1268 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1269 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1270 MAC_FORMAT_ARGS(macaddr), vlan);
1271
1272 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1273 if (f == NULL)
1274 return;
1275
1276 LIST_REMOVE(f, ftle);
1277 LIST_INIT(&ftl_head);
1278 LIST_INSERT_HEAD(&ftl_head, f, ftle);
1279 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1280 vsi->num_macs--;
1281
1282 /* If this is not the last vlan just remove the filter */
1283 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1284 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1285 return;
1286 }
1287
1288 /* It's the last vlan, we need to switch back to a non-vlan filter */
1289 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1290 if (tmp != NULL) {
1291 LIST_REMOVE(tmp, ftle);
1292 LIST_INSERT_AFTER(f, tmp, ftle);
1293 to_del_cnt++;
1294 }
1295 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1296
1297 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1298}
1299
1300/**
1301 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1302 * @vsi: VSI which filters need to be removed
1303 * @macaddr: MAC address
1304 *
1305 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1306 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1307 * so skip them to speed up processing. Those filters should be removed
1308 * using ixl_del_filter function.
1309 */
1310void
1311ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1312{
1313 struct ixl_mac_filter *f, *tmp;
1314 struct ixl_ftl_head to_del;
1315 int to_del_cnt = 0;
1316
1317 LIST_INIT(&to_del);
1318
1319 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1320 if ((f->flags & IXL_FILTER_MC) != 0 ||
1321 !ixl_ether_is_equal(f->macaddr, macaddr))
1322 continue;
1323
1324 LIST_REMOVE(f, ftle);
1325 LIST_INSERT_HEAD(&to_del, f, ftle);
1326 to_del_cnt++;
1327 }
1328
1329 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1330 "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1331 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1332 if (to_del_cnt > 0)
1333 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1334}
1335
1336/*
1337** Find the filter with both matching mac addr and vlan id
1338*/
1339struct ixl_mac_filter *
1340ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1341{
1342 struct ixl_mac_filter *f;
1343
1344 LIST_FOREACH(f, headp, ftle) {
1345 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1346 (f->vlan == vlan)) {
1347 return (f);
1348 }
1349 }
1350
1351 return (NULL);
1352}
1353
1354/*
1355** This routine takes additions to the vsi filter
1356** table and creates an Admin Queue call to create
1357** the filters in the hardware.
1358*/
1359void
1360ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1361{
1362 struct i40e_aqc_add_macvlan_element_data *a, *b;
1363 struct ixl_mac_filter *f, *fn;
1364 struct ixl_pf *pf;
1365 struct i40e_hw *hw;
1366 device_t dev;
1367 enum i40e_status_code status;
1368 int j = 0;
1369
1370 pf = vsi->back;
1371 dev = vsi->dev;
1372 hw = &pf->hw;
1373
1374 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1375
1376 if (cnt < 1) {
1377 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1378 return;
1379 }
1380
1381 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1382 M_IXL, M_NOWAIT | M_ZERO);
1383 if (a == NULL) {
1384 device_printf(dev, "add_hw_filters failed to get memory\n");
1385 return;
1386 }
1387
1388 LIST_FOREACH(f, to_add, ftle) {
1389 b = &a[j]; // a pox on fvl long names :)
1390 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1391 if (f->vlan == IXL_VLAN_ANY) {
1392 b->vlan_tag = 0;
1393 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1394 } else {
1395 b->vlan_tag = f->vlan;
1396 b->flags = 0;
1397 }
1398 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1399 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1400 MAC_FORMAT_ARGS(f->macaddr));
1401
1402 if (++j == cnt)
1403 break;
1404 }
1405 if (j != cnt) {
1406 /* Something went wrong */
1407 device_printf(dev,
1408 "%s ERROR: list of filters to short expected: %d, found: %d\n",
1409 __func__, cnt, j);
1410 ixl_free_filters(to_add);
1411 goto out_free;
1412 }
1413
1414 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1415 if (status == I40E_SUCCESS) {
1416 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1417 vsi->num_hw_filters += j;
1418 goto out_free;
1419 }
1420
1421 device_printf(dev,
1422 "i40e_aq_add_macvlan status %s, error %s\n",
1423 i40e_stat_str(hw, status),
1424 i40e_aq_str(hw, hw->aq.asq_last_status));
1425 j = 0;
1426
1427 /* Verify which filters were actually configured in HW
1428 * and add them to the list */
1429 LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1430 LIST_REMOVE(f, ftle);
1431 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1432 ixl_dbg_filter(pf,
1433 "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1434 __func__,
1435 MAC_FORMAT_ARGS(f->macaddr),
1436 f->vlan);
1437 free(f, M_IXL);
1438 } else {
1439 LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1440 vsi->num_hw_filters++;
1441 }
1442 j++;
1443 }
1444
1445out_free:
1446 free(a, M_IXL);
1447}
1448
1449/*
1450** This routine takes removals in the vsi filter
1451** table and creates an Admin Queue call to delete
1452** the filters in the hardware.
1453*/
1454void
1455ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1456{
1457 struct i40e_aqc_remove_macvlan_element_data *d, *e;
1458 struct ixl_pf *pf;
1459 struct i40e_hw *hw;
1460 device_t dev;
1461 struct ixl_mac_filter *f, *f_temp;
1462 enum i40e_status_code status;
1463 int j = 0;
1464
1465 pf = vsi->back;
1466 hw = &pf->hw;
1467 dev = vsi->dev;
1468
1469 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1470
1471 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1472 M_IXL, M_NOWAIT | M_ZERO);
1473 if (d == NULL) {
1474 device_printf(dev, "%s: failed to get memory\n", __func__);
1475 return;
1476 }
1477
1478 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1479 e = &d[j]; // a pox on fvl long names :)
1480 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1481 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1482 if (f->vlan == IXL_VLAN_ANY) {
1483 e->vlan_tag = 0;
1484 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1485 } else {
1486 e->vlan_tag = f->vlan;
1487 }
1488
1489 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1490 MAC_FORMAT_ARGS(f->macaddr));
1491
1492 /* delete entry from the list */
1493 LIST_REMOVE(f, ftle);
1494 free(f, M_IXL);
1495 if (++j == cnt)
1496 break;
1497 }
1498 if (j != cnt || !LIST_EMPTY(to_del)) {
1499 /* Something went wrong */
1500 device_printf(dev,
1501 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1502 __func__, cnt, j);
1503 ixl_free_filters(to_del);
1504 goto out_free;
1505 }
1506 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1507 if (status) {
1508 device_printf(dev,
1509 "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1510 __func__, i40e_stat_str(hw, status),
1511 i40e_aq_str(hw, hw->aq.asq_last_status));
1512 for (int i = 0; i < j; i++) {
1513 if (d[i].error_code == 0)
1514 continue;
1515 device_printf(dev,
1516 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1517 __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1518 d[i].vlan_tag);
1519 }
1520 }
1521
1522 vsi->num_hw_filters -= j;
1523
1524out_free:
1525 free(d, M_IXL);
1526
1527 ixl_dbg_filter(pf, "%s: end\n", __func__);
1528}
1529
1530int
1531ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1532{
1533 struct i40e_hw *hw = &pf->hw;
1534 int error = 0;
1535 u32 reg;
1536 u16 pf_qidx;
1537
1538 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1539
1540 ixl_dbg(pf, IXL_DBG_EN_DIS,
1541 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1542 pf_qidx, vsi_qidx);
1543
1544 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1545
1546 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1547 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1548 I40E_QTX_ENA_QENA_STAT_MASK;
1549 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1550 /* Verify the enable took */
1551 for (int j = 0; j < 10; j++) {
1552 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1553 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1554 break;
1555 i40e_usec_delay(10);
1556 }
1557 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1558 device_printf(pf->dev, "TX queue %d still disabled!\n",
1559 pf_qidx);
1560 error = ETIMEDOUT;
1561 }
1562
1563 return (error);
1564}
1565
1566int
1567ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1568{
1569 struct i40e_hw *hw = &pf->hw;
1570 int error = 0;
1571 u32 reg;
1572 u16 pf_qidx;
1573
1574 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1575
1576 ixl_dbg(pf, IXL_DBG_EN_DIS,
1577 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1578 pf_qidx, vsi_qidx);
1579
1580 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1581 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1582 I40E_QRX_ENA_QENA_STAT_MASK;
1583 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1584 /* Verify the enable took */
1585 for (int j = 0; j < 10; j++) {
1586 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1587 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1588 break;
1589 i40e_usec_delay(10);
1590 }
1591 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1592 device_printf(pf->dev, "RX queue %d still disabled!\n",
1593 pf_qidx);
1594 error = ETIMEDOUT;
1595 }
1596
1597 return (error);
1598}
1599
1600int
1601ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1602{
1603 int error = 0;
1604
1605 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1606 /* Called function already prints error message */
1607 if (error)
1608 return (error);
1609 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1610 return (error);
1611}
1612
1613/*
1614 * Returns error on first ring that is detected hung.
1615 */
1616int
1617ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1618{
1619 struct i40e_hw *hw = &pf->hw;
1620 int error = 0;
1621 u32 reg;
1622 u16 pf_qidx;
1623
1624 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1625
1626 ixl_dbg(pf, IXL_DBG_EN_DIS,
1627 "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1628 pf_qidx, vsi_qidx);
1629
1630 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1631 i40e_usec_delay(500);
1632
1633 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1634 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1635 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1636 /* Verify the disable took */
1637 for (int j = 0; j < 10; j++) {
1638 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1639 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1640 break;
1641 i40e_msec_delay(10);
1642 }
1643 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1644 device_printf(pf->dev, "TX queue %d still enabled!\n",
1645 pf_qidx);
1646 error = ETIMEDOUT;
1647 }
1648
1649 return (error);
1650}
1651
1652/*
1653 * Returns error on first ring that is detected hung.
1654 */
1655int
1656ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1657{
1658 struct i40e_hw *hw = &pf->hw;
1659 int error = 0;
1660 u32 reg;
1661 u16 pf_qidx;
1662
1663 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1664
1665 ixl_dbg(pf, IXL_DBG_EN_DIS,
1666 "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1667 pf_qidx, vsi_qidx);
1668
1669 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1670 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1671 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1672 /* Verify the disable took */
1673 for (int j = 0; j < 10; j++) {
1674 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1675 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1676 break;
1677 i40e_msec_delay(10);
1678 }
1679 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1680 device_printf(pf->dev, "RX queue %d still enabled!\n",
1681 pf_qidx);
1682 error = ETIMEDOUT;
1683 }
1684
1685 return (error);
1686}
1687
1688int
1689ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1690{
1691 int error = 0;
1692
1693 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1694 /* Called function already prints error message */
1695 if (error)
1696 return (error);
1697 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1698 return (error);
1699}
1700
1701static void
1702ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1703{
1704 struct i40e_hw *hw = &pf->hw;
1705 device_t dev = pf->dev;
1706 struct ixl_vf *vf;
1707 bool mdd_detected = false;
1708 bool pf_mdd_detected = false;
1709 bool vf_mdd_detected = false;
1710 u16 vf_num, queue;
1711 u8 pf_num, event;
1712 u8 pf_mdet_num, vp_mdet_num;
1713 u32 reg;
1714
1715 /* find what triggered the MDD event */
1716 reg = rd32(hw, I40E_GL_MDET_TX);
1717 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1718 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1719 I40E_GL_MDET_TX_PF_NUM_SHIFT;
1720 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1721 I40E_GL_MDET_TX_VF_NUM_SHIFT;
1722 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1723 I40E_GL_MDET_TX_EVENT_SHIFT;
1724 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1725 I40E_GL_MDET_TX_QUEUE_SHIFT;
1726 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1727 mdd_detected = true;
1728 }
1729
1730 if (!mdd_detected)
1731 return;
1732
1733 reg = rd32(hw, I40E_PF_MDET_TX);
1734 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1735 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1736 pf_mdet_num = hw->pf_id;
1737 pf_mdd_detected = true;
1738 }
1739
1740 /* Check if MDD was caused by a VF */
1741 for (int i = 0; i < pf->num_vfs; i++) {
1742 vf = &(pf->vfs[i]);
1743 reg = rd32(hw, I40E_VP_MDET_TX(i));
1744 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1745 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1746 vp_mdet_num = i;
1747 vf->num_mdd_events++;
1748 vf_mdd_detected = true;
1749 }
1750 }
1751
1752 /* Print out an error message */
1753 if (vf_mdd_detected && pf_mdd_detected)
1754 device_printf(dev,
1755 "Malicious Driver Detection event %d"
1756 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1757 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1758 else if (vf_mdd_detected && !pf_mdd_detected)
1759 device_printf(dev,
1760 "Malicious Driver Detection event %d"
1761 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1762 event, queue, pf_num, vf_num, vp_mdet_num);
1763 else if (!vf_mdd_detected && pf_mdd_detected)
1764 device_printf(dev,
1765 "Malicious Driver Detection event %d"
1766 " on TX queue %d, pf number %d (PF-%d)\n",
1767 event, queue, pf_num, pf_mdet_num);
1768 /* Theoretically shouldn't happen */
1769 else
1770 device_printf(dev,
1771 "TX Malicious Driver Detection event (unknown)\n");
1772}
1773
1774static void
1775ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1776{
1777 struct i40e_hw *hw = &pf->hw;
1778 device_t dev = pf->dev;
1779 struct ixl_vf *vf;
1780 bool mdd_detected = false;
1781 bool pf_mdd_detected = false;
1782 bool vf_mdd_detected = false;
1783 u16 queue;
1784 u8 pf_num, event;
1785 u8 pf_mdet_num, vp_mdet_num;
1786 u32 reg;
1787
1788 /*
1789 * GL_MDET_RX doesn't contain VF number information, unlike
1790 * GL_MDET_TX.
1791 */
1792 reg = rd32(hw, I40E_GL_MDET_RX);
1793 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1794 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1795 I40E_GL_MDET_RX_FUNCTION_SHIFT;
1796 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1797 I40E_GL_MDET_RX_EVENT_SHIFT;
1798 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1799 I40E_GL_MDET_RX_QUEUE_SHIFT;
1800 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1801 mdd_detected = true;
1802 }
1803
1804 if (!mdd_detected)
1805 return;
1806
1807 reg = rd32(hw, I40E_PF_MDET_RX);
1808 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1809 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1810 pf_mdet_num = hw->pf_id;
1811 pf_mdd_detected = true;
1812 }
1813
1814 /* Check if MDD was caused by a VF */
1815 for (int i = 0; i < pf->num_vfs; i++) {
1816 vf = &(pf->vfs[i]);
1817 reg = rd32(hw, I40E_VP_MDET_RX(i));
1818 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1819 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1820 vp_mdet_num = i;
1821 vf->num_mdd_events++;
1822 vf_mdd_detected = true;
1823 }
1824 }
1825
1826 /* Print out an error message */
1827 if (vf_mdd_detected && pf_mdd_detected)
1828 device_printf(dev,
1829 "Malicious Driver Detection event %d"
1830 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1831 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1832 else if (vf_mdd_detected && !pf_mdd_detected)
1833 device_printf(dev,
1834 "Malicious Driver Detection event %d"
1835 " on RX queue %d, pf number %d, (VF-%d)\n",
1836 event, queue, pf_num, vp_mdet_num);
1837 else if (!vf_mdd_detected && pf_mdd_detected)
1838 device_printf(dev,
1839 "Malicious Driver Detection event %d"
1840 " on RX queue %d, pf number %d (PF-%d)\n",
1841 event, queue, pf_num, pf_mdet_num);
1842 /* Theoretically shouldn't happen */
1843 else
1844 device_printf(dev,
1845 "RX Malicious Driver Detection event (unknown)\n");
1846}
1847
1848/**
1849 * ixl_handle_mdd_event
1850 *
1851 * Called from interrupt handler to identify possibly malicious vfs
1852 * (But also detects events from the PF, as well)
1853 **/
1854void
1855ixl_handle_mdd_event(struct ixl_pf *pf)
1856{
1857 struct i40e_hw *hw = &pf->hw;
1858 u32 reg;
1859
1860 /*
1861 * Handle both TX/RX because it's possible they could
1862 * both trigger in the same interrupt.
1863 */
1864 ixl_handle_tx_mdd_event(pf);
1865 ixl_handle_rx_mdd_event(pf);
1866
1867 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1868
1869 /* re-enable mdd interrupt cause */
1870 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1871 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1872 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1873 ixl_flush(hw);
1874}
1875
1876void
1877ixl_enable_intr0(struct i40e_hw *hw)
1878{
1879 u32 reg;
1880
1881 /* Use IXL_ITR_NONE so ITR isn't updated here */
1882 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1883 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1884 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1885 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1886}
1887
1888void
1889ixl_disable_intr0(struct i40e_hw *hw)
1890{
1891 u32 reg;
1892
1893 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1894 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1895 ixl_flush(hw);
1896}
1897
1898void
1899ixl_enable_queue(struct i40e_hw *hw, int id)
1900{
1901 u32 reg;
1902
1903 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1904 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1905 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1906 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1907}
1908
1909void
1910ixl_disable_queue(struct i40e_hw *hw, int id)
1911{
1912 u32 reg;
1913
1914 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1915 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1916}
1917
1918void
1919ixl_handle_empr_reset(struct ixl_pf *pf)
1920{
1921 struct ixl_vsi *vsi = &pf->vsi;
1922 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
1922 bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING);
1923
1924 ixl_prepare_for_reset(pf, is_up);
1925 /*
1926 * i40e_pf_reset checks the type of reset and acts
1927 * accordingly. If EMP or Core reset was performed
1928 * doing PF reset is not necessary and it sometimes
1929 * fails.
1930 */
1931 ixl_pf_reset(pf);
1932
1933 if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1934 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1935 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1936 device_printf(pf->dev,
1937 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1938 pf->link_up = FALSE;
1939 ixl_update_link_status(pf);
1940 }
1941
1942 ixl_rebuild_hw_structs_after_reset(pf, is_up);
1943
1944 atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING);
1945}
1946
1947void
1948ixl_update_stats_counters(struct ixl_pf *pf)
1949{
1950 struct i40e_hw *hw = &pf->hw;
1951 struct ixl_vsi *vsi = &pf->vsi;
1952 struct ixl_vf *vf;
1953 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1954
1955 struct i40e_hw_port_stats *nsd = &pf->stats;
1956 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1957
1958 /* Update hw stats */
1959 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1960 pf->stat_offsets_loaded,
1961 &osd->crc_errors, &nsd->crc_errors);
1962 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1963 pf->stat_offsets_loaded,
1964 &osd->illegal_bytes, &nsd->illegal_bytes);
1965 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1966 I40E_GLPRT_GORCL(hw->port),
1967 pf->stat_offsets_loaded,
1968 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1969 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1970 I40E_GLPRT_GOTCL(hw->port),
1971 pf->stat_offsets_loaded,
1972 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1973 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1974 pf->stat_offsets_loaded,
1975 &osd->eth.rx_discards,
1976 &nsd->eth.rx_discards);
1977 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1978 I40E_GLPRT_UPRCL(hw->port),
1979 pf->stat_offsets_loaded,
1980 &osd->eth.rx_unicast,
1981 &nsd->eth.rx_unicast);
1982 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1983 I40E_GLPRT_UPTCL(hw->port),
1984 pf->stat_offsets_loaded,
1985 &osd->eth.tx_unicast,
1986 &nsd->eth.tx_unicast);
1987 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1988 I40E_GLPRT_MPRCL(hw->port),
1989 pf->stat_offsets_loaded,
1990 &osd->eth.rx_multicast,
1991 &nsd->eth.rx_multicast);
1992 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1993 I40E_GLPRT_MPTCL(hw->port),
1994 pf->stat_offsets_loaded,
1995 &osd->eth.tx_multicast,
1996 &nsd->eth.tx_multicast);
1997 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1998 I40E_GLPRT_BPRCL(hw->port),
1999 pf->stat_offsets_loaded,
2000 &osd->eth.rx_broadcast,
2001 &nsd->eth.rx_broadcast);
2002 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2003 I40E_GLPRT_BPTCL(hw->port),
2004 pf->stat_offsets_loaded,
2005 &osd->eth.tx_broadcast,
2006 &nsd->eth.tx_broadcast);
2007
2008 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2009 pf->stat_offsets_loaded,
2010 &osd->tx_dropped_link_down,
2011 &nsd->tx_dropped_link_down);
2012 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2013 pf->stat_offsets_loaded,
2014 &osd->mac_local_faults,
2015 &nsd->mac_local_faults);
2016 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2017 pf->stat_offsets_loaded,
2018 &osd->mac_remote_faults,
2019 &nsd->mac_remote_faults);
2020 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2021 pf->stat_offsets_loaded,
2022 &osd->rx_length_errors,
2023 &nsd->rx_length_errors);
2024
2025 /* Flow control (LFC) stats */
2026 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2027 pf->stat_offsets_loaded,
2028 &osd->link_xon_rx, &nsd->link_xon_rx);
2029 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2030 pf->stat_offsets_loaded,
2031 &osd->link_xon_tx, &nsd->link_xon_tx);
2032 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2033 pf->stat_offsets_loaded,
2034 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2035 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2036 pf->stat_offsets_loaded,
2037 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2038
2039 /*
2040 * For watchdog management we need to know if we have been paused
2041 * during the last interval, so capture that here.
2042 */
2043 if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2044 vsi->shared->isc_pause_frames = 1;
2045
2046 /* Packet size stats rx */
2047 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2048 I40E_GLPRT_PRC64L(hw->port),
2049 pf->stat_offsets_loaded,
2050 &osd->rx_size_64, &nsd->rx_size_64);
2051 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2052 I40E_GLPRT_PRC127L(hw->port),
2053 pf->stat_offsets_loaded,
2054 &osd->rx_size_127, &nsd->rx_size_127);
2055 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2056 I40E_GLPRT_PRC255L(hw->port),
2057 pf->stat_offsets_loaded,
2058 &osd->rx_size_255, &nsd->rx_size_255);
2059 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2060 I40E_GLPRT_PRC511L(hw->port),
2061 pf->stat_offsets_loaded,
2062 &osd->rx_size_511, &nsd->rx_size_511);
2063 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2064 I40E_GLPRT_PRC1023L(hw->port),
2065 pf->stat_offsets_loaded,
2066 &osd->rx_size_1023, &nsd->rx_size_1023);
2067 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2068 I40E_GLPRT_PRC1522L(hw->port),
2069 pf->stat_offsets_loaded,
2070 &osd->rx_size_1522, &nsd->rx_size_1522);
2071 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2072 I40E_GLPRT_PRC9522L(hw->port),
2073 pf->stat_offsets_loaded,
2074 &osd->rx_size_big, &nsd->rx_size_big);
2075
2076 /* Packet size stats tx */
2077 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2078 I40E_GLPRT_PTC64L(hw->port),
2079 pf->stat_offsets_loaded,
2080 &osd->tx_size_64, &nsd->tx_size_64);
2081 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2082 I40E_GLPRT_PTC127L(hw->port),
2083 pf->stat_offsets_loaded,
2084 &osd->tx_size_127, &nsd->tx_size_127);
2085 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2086 I40E_GLPRT_PTC255L(hw->port),
2087 pf->stat_offsets_loaded,
2088 &osd->tx_size_255, &nsd->tx_size_255);
2089 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2090 I40E_GLPRT_PTC511L(hw->port),
2091 pf->stat_offsets_loaded,
2092 &osd->tx_size_511, &nsd->tx_size_511);
2093 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2094 I40E_GLPRT_PTC1023L(hw->port),
2095 pf->stat_offsets_loaded,
2096 &osd->tx_size_1023, &nsd->tx_size_1023);
2097 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2098 I40E_GLPRT_PTC1522L(hw->port),
2099 pf->stat_offsets_loaded,
2100 &osd->tx_size_1522, &nsd->tx_size_1522);
2101 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2102 I40E_GLPRT_PTC9522L(hw->port),
2103 pf->stat_offsets_loaded,
2104 &osd->tx_size_big, &nsd->tx_size_big);
2105
2106 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2107 pf->stat_offsets_loaded,
2108 &osd->rx_undersize, &nsd->rx_undersize);
2109 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2110 pf->stat_offsets_loaded,
2111 &osd->rx_fragments, &nsd->rx_fragments);
2112 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2113 pf->stat_offsets_loaded,
2114 &osd->rx_oversize, &nsd->rx_oversize);
2115 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2116 pf->stat_offsets_loaded,
2117 &osd->rx_jabber, &nsd->rx_jabber);
2118 /* EEE */
2119 i40e_get_phy_lpi_status(hw, nsd);
2120
2121 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2122 &osd->tx_lpi_count, &nsd->tx_lpi_count,
2123 &osd->rx_lpi_count, &nsd->rx_lpi_count);
2124
2125 pf->stat_offsets_loaded = true;
2126 /* End hw stats */
2127
2128 /* Update vsi stats */
2129 ixl_update_vsi_stats(vsi);
2130
2131 for (int i = 0; i < pf->num_vfs; i++) {
2132 vf = &pf->vfs[i];
2133 if (vf->vf_flags & VF_FLAG_ENABLED)
2134 ixl_update_eth_stats(&pf->vfs[i].vsi);
2135 }
2136}
2137
2138/**
2139 * Update VSI-specific ethernet statistics counters.
2140 **/
2141void
2142ixl_update_eth_stats(struct ixl_vsi *vsi)
2143{
2144 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2145 struct i40e_hw *hw = &pf->hw;
2146 struct i40e_eth_stats *es;
2147 struct i40e_eth_stats *oes;
2148 u16 stat_idx = vsi->info.stat_counter_idx;
2149
2150 es = &vsi->eth_stats;
2151 oes = &vsi->eth_stats_offsets;
2152
2153 /* Gather up the stats that the hw collects */
2154 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2155 vsi->stat_offsets_loaded,
2156 &oes->tx_errors, &es->tx_errors);
2157 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2158 vsi->stat_offsets_loaded,
2159 &oes->rx_discards, &es->rx_discards);
2160
2161 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2162 I40E_GLV_GORCL(stat_idx),
2163 vsi->stat_offsets_loaded,
2164 &oes->rx_bytes, &es->rx_bytes);
2165 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2166 I40E_GLV_UPRCL(stat_idx),
2167 vsi->stat_offsets_loaded,
2168 &oes->rx_unicast, &es->rx_unicast);
2169 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2170 I40E_GLV_MPRCL(stat_idx),
2171 vsi->stat_offsets_loaded,
2172 &oes->rx_multicast, &es->rx_multicast);
2173 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2174 I40E_GLV_BPRCL(stat_idx),
2175 vsi->stat_offsets_loaded,
2176 &oes->rx_broadcast, &es->rx_broadcast);
2177
2178 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2179 I40E_GLV_GOTCL(stat_idx),
2180 vsi->stat_offsets_loaded,
2181 &oes->tx_bytes, &es->tx_bytes);
2182 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2183 I40E_GLV_UPTCL(stat_idx),
2184 vsi->stat_offsets_loaded,
2185 &oes->tx_unicast, &es->tx_unicast);
2186 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2187 I40E_GLV_MPTCL(stat_idx),
2188 vsi->stat_offsets_loaded,
2189 &oes->tx_multicast, &es->tx_multicast);
2190 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2191 I40E_GLV_BPTCL(stat_idx),
2192 vsi->stat_offsets_loaded,
2193 &oes->tx_broadcast, &es->tx_broadcast);
2194 vsi->stat_offsets_loaded = true;
2195}
2196
2197void
2198ixl_update_vsi_stats(struct ixl_vsi *vsi)
2199{
2200 struct ixl_pf *pf;
2201 struct i40e_eth_stats *es;
2202 u64 tx_discards, csum_errs;
2203
2204 struct i40e_hw_port_stats *nsd;
2205
2206 pf = vsi->back;
2207 es = &vsi->eth_stats;
2208 nsd = &pf->stats;
2209
2210 ixl_update_eth_stats(vsi);
2211
2212 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2213
2214 csum_errs = 0;
2215 for (int i = 0; i < vsi->num_rx_queues; i++)
2216 csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2217 nsd->checksum_error = csum_errs;
2218
2219 /* Update ifnet stats */
2220 IXL_SET_IPACKETS(vsi, es->rx_unicast +
2221 es->rx_multicast +
2222 es->rx_broadcast);
2223 IXL_SET_OPACKETS(vsi, es->tx_unicast +
2224 es->tx_multicast +
2225 es->tx_broadcast);
2226 IXL_SET_IBYTES(vsi, es->rx_bytes);
2227 IXL_SET_OBYTES(vsi, es->tx_bytes);
2228 IXL_SET_IMCASTS(vsi, es->rx_multicast);
2229 IXL_SET_OMCASTS(vsi, es->tx_multicast);
2230
2231 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2232 nsd->checksum_error + nsd->rx_length_errors +
2233 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2234 nsd->rx_jabber);
2235 IXL_SET_OERRORS(vsi, es->tx_errors);
2236 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2237 IXL_SET_OQDROPS(vsi, tx_discards);
2238 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2239 IXL_SET_COLLISIONS(vsi, 0);
2240}
2241
2242/**
2243 * Reset all of the stats for the given pf
2244 **/
2245void
2246ixl_pf_reset_stats(struct ixl_pf *pf)
2247{
2248 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2249 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2250 pf->stat_offsets_loaded = false;
2251}
2252
2253/**
2254 * Resets all stats of the given vsi
2255 **/
2256void
2257ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2258{
2259 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2260 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2261 vsi->stat_offsets_loaded = false;
2262}
2263
2264/**
2265 * Read and update a 48 bit stat from the hw
2266 *
2267 * Since the device stats are not reset at PFReset, they likely will not
2268 * be zeroed when the driver starts. We'll save the first values read
2269 * and use them as offsets to be subtracted from the raw values in order
2270 * to report stats that count from zero.
2271 **/
2272void
2273ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2274 bool offset_loaded, u64 *offset, u64 *stat)
2275{
2276 u64 new_data;
2277
2278#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2279 new_data = rd64(hw, loreg);
2280#else
2281 /*
2282 * Use two rd32's instead of one rd64; FreeBSD versions before
2283 * 10 don't support 64-bit bus reads/writes.
2284 */
2285 new_data = rd32(hw, loreg);
2286 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2287#endif
2288
2289 if (!offset_loaded)
2290 *offset = new_data;
2291 if (new_data >= *offset)
2292 *stat = new_data - *offset;
2293 else
2294 *stat = (new_data + ((u64)1 << 48)) - *offset;
2295 *stat &= 0xFFFFFFFFFFFFULL;
2296}
2297
2298/**
2299 * Read and update a 32 bit stat from the hw
2300 **/
2301void
2302ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2303 bool offset_loaded, u64 *offset, u64 *stat)
2304{
2305 u32 new_data;
2306
2307 new_data = rd32(hw, reg);
2308 if (!offset_loaded)
2309 *offset = new_data;
2310 if (new_data >= *offset)
2311 *stat = (u32)(new_data - *offset);
2312 else
2313 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2314}
2315
2316/**
2317 * Add subset of device sysctls safe to use in recovery mode
2318 */
2319void
2320ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2321{
2322 device_t dev = pf->dev;
2323
2324 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2325 struct sysctl_oid_list *ctx_list =
2326 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2327
2328 struct sysctl_oid *debug_node;
2329 struct sysctl_oid_list *debug_list;
2330
2331 SYSCTL_ADD_PROC(ctx, ctx_list,
2332 OID_AUTO, "fw_version",
2333 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2334 ixl_sysctl_show_fw, "A", "Firmware version");
2335
2336 /* Add sysctls meant to print debug information, but don't list them
2337 * in "sysctl -a" output. */
2338 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2339 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2340 "Debug Sysctls");
2341 debug_list = SYSCTL_CHILDREN(debug_node);
2342
2343 SYSCTL_ADD_UINT(ctx, debug_list,
2344 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2345 &pf->hw.debug_mask, 0, "Shared code debug message level");
2346
2347 SYSCTL_ADD_UINT(ctx, debug_list,
2348 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2349 &pf->dbg_mask, 0, "Non-shared code debug message level");
2350
2351 SYSCTL_ADD_PROC(ctx, debug_list,
2352 OID_AUTO, "dump_debug_data",
2353 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2354 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2355
2356 SYSCTL_ADD_PROC(ctx, debug_list,
2357 OID_AUTO, "do_pf_reset",
2358 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2359 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2360
2361 SYSCTL_ADD_PROC(ctx, debug_list,
2362 OID_AUTO, "do_core_reset",
2363 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2364 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2365
2366 SYSCTL_ADD_PROC(ctx, debug_list,
2367 OID_AUTO, "do_global_reset",
2368 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2369 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2370
2371 SYSCTL_ADD_PROC(ctx, debug_list,
2372 OID_AUTO, "queue_interrupt_table",
2373 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2374 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2375}
2376
2377void
2378ixl_add_device_sysctls(struct ixl_pf *pf)
2379{
2380 device_t dev = pf->dev;
2381 struct i40e_hw *hw = &pf->hw;
2382
2383 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2384 struct sysctl_oid_list *ctx_list =
2385 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2386
2387 struct sysctl_oid *debug_node;
2388 struct sysctl_oid_list *debug_list;
2389
2390 struct sysctl_oid *fec_node;
2391 struct sysctl_oid_list *fec_list;
2392 struct sysctl_oid *eee_node;
2393 struct sysctl_oid_list *eee_list;
2394
2395 /* Set up sysctls */
2396 SYSCTL_ADD_PROC(ctx, ctx_list,
2397 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2398 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2399
2400 SYSCTL_ADD_PROC(ctx, ctx_list,
2401 OID_AUTO, "advertise_speed",
2402 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2403 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2404
2405 SYSCTL_ADD_PROC(ctx, ctx_list,
2406 OID_AUTO, "supported_speeds",
2407 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2408 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2409
2410 SYSCTL_ADD_PROC(ctx, ctx_list,
2411 OID_AUTO, "current_speed",
2412 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2413 ixl_sysctl_current_speed, "A", "Current Port Speed");
2414
2415 SYSCTL_ADD_PROC(ctx, ctx_list,
2416 OID_AUTO, "fw_version",
2417 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2418 ixl_sysctl_show_fw, "A", "Firmware version");
2419
2420 SYSCTL_ADD_PROC(ctx, ctx_list,
2421 OID_AUTO, "unallocated_queues",
2422 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2423 ixl_sysctl_unallocated_queues, "I",
2424 "Queues not allocated to a PF or VF");
2425
2426 SYSCTL_ADD_PROC(ctx, ctx_list,
2427 OID_AUTO, "tx_itr",
2428 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2429 ixl_sysctl_pf_tx_itr, "I",
2430 "Immediately set TX ITR value for all queues");
2431
2432 SYSCTL_ADD_PROC(ctx, ctx_list,
2433 OID_AUTO, "rx_itr",
2434 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2435 ixl_sysctl_pf_rx_itr, "I",
2436 "Immediately set RX ITR value for all queues");
2437
2438 SYSCTL_ADD_INT(ctx, ctx_list,
2439 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2440 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2441
2442 SYSCTL_ADD_INT(ctx, ctx_list,
2443 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2444 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2445
2446 /* Add FEC sysctls for 25G adapters */
2447 if (i40e_is_25G_device(hw->device_id)) {
2448 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2449 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2450 "FEC Sysctls");
2451 fec_list = SYSCTL_CHILDREN(fec_node);
2452
2453 SYSCTL_ADD_PROC(ctx, fec_list,
2454 OID_AUTO, "fc_ability",
2455 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2456 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2457
2458 SYSCTL_ADD_PROC(ctx, fec_list,
2459 OID_AUTO, "rs_ability",
2460 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2461 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2462
2463 SYSCTL_ADD_PROC(ctx, fec_list,
2464 OID_AUTO, "fc_requested",
2465 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2466 ixl_sysctl_fec_fc_request, "I",
2467 "FC FEC mode requested on link");
2468
2469 SYSCTL_ADD_PROC(ctx, fec_list,
2470 OID_AUTO, "rs_requested",
2471 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2472 ixl_sysctl_fec_rs_request, "I",
2473 "RS FEC mode requested on link");
2474
2475 SYSCTL_ADD_PROC(ctx, fec_list,
2476 OID_AUTO, "auto_fec_enabled",
2477 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2478 ixl_sysctl_fec_auto_enable, "I",
2479 "Let FW decide FEC ability/request modes");
2480 }
2481
2482 SYSCTL_ADD_PROC(ctx, ctx_list,
2483 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2484 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2485
2486 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2487 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2488 "Energy Efficient Ethernet (EEE) Sysctls");
2489 eee_list = SYSCTL_CHILDREN(eee_node);
2490
2491 SYSCTL_ADD_PROC(ctx, eee_list,
2492 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2493 pf, 0, ixl_sysctl_eee_enable, "I",
2494 "Enable Energy Efficient Ethernet (EEE)");
2495
2496 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2497 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2498 "TX LPI status");
2499
2500 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2501 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2502 "RX LPI status");
2503
2504 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2505 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2506 "TX LPI count");
2507
2508 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2509 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2510 "RX LPI count");
2511
2512 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2513 "link_active_on_if_down",
2514 CTLTYPE_INT | CTLFLAG_RWTUN,
2515 pf, 0, ixl_sysctl_set_link_active, "I",
2516 IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2517
2518 /* Add sysctls meant to print debug information, but don't list them
2519 * in "sysctl -a" output. */
2520 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2521 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2522 "Debug Sysctls");
2523 debug_list = SYSCTL_CHILDREN(debug_node);
2524
2525 SYSCTL_ADD_UINT(ctx, debug_list,
2526 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2527 &pf->hw.debug_mask, 0, "Shared code debug message level");
2528
2529 SYSCTL_ADD_UINT(ctx, debug_list,
2530 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2531 &pf->dbg_mask, 0, "Non-shared code debug message level");
2532
2533 SYSCTL_ADD_PROC(ctx, debug_list,
2534 OID_AUTO, "link_status",
2535 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2536 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2537
2538 SYSCTL_ADD_PROC(ctx, debug_list,
2539 OID_AUTO, "phy_abilities_init",
2540 CTLTYPE_STRING | CTLFLAG_RD,
2541 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2542
2543 SYSCTL_ADD_PROC(ctx, debug_list,
2544 OID_AUTO, "phy_abilities",
2545 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2546 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2547
2548 SYSCTL_ADD_PROC(ctx, debug_list,
2549 OID_AUTO, "filter_list",
2550 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2551 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2552
2553 SYSCTL_ADD_PROC(ctx, debug_list,
2554 OID_AUTO, "hw_res_alloc",
2555 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2556 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2557
2558 SYSCTL_ADD_PROC(ctx, debug_list,
2559 OID_AUTO, "switch_config",
2560 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2561 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2562
2563 SYSCTL_ADD_PROC(ctx, debug_list,
2564 OID_AUTO, "switch_vlans",
2565 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2566 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2567
2568 SYSCTL_ADD_PROC(ctx, debug_list,
2569 OID_AUTO, "rss_key",
2570 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2571 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2572
2573 SYSCTL_ADD_PROC(ctx, debug_list,
2574 OID_AUTO, "rss_lut",
2575 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2576 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2577
2578 SYSCTL_ADD_PROC(ctx, debug_list,
2579 OID_AUTO, "rss_hena",
2580 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2581 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2582
2583 SYSCTL_ADD_PROC(ctx, debug_list,
2584 OID_AUTO, "disable_fw_link_management",
2585 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2586 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2587
2588 SYSCTL_ADD_PROC(ctx, debug_list,
2589 OID_AUTO, "dump_debug_data",
2590 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2591 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2592
2593 SYSCTL_ADD_PROC(ctx, debug_list,
2594 OID_AUTO, "do_pf_reset",
2595 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2596 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2597
2598 SYSCTL_ADD_PROC(ctx, debug_list,
2599 OID_AUTO, "do_core_reset",
2600 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2601 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2602
2603 SYSCTL_ADD_PROC(ctx, debug_list,
2604 OID_AUTO, "do_global_reset",
2605 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2606 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2607
2608 SYSCTL_ADD_PROC(ctx, debug_list,
2609 OID_AUTO, "queue_interrupt_table",
2610 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2611 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2612
2613 if (pf->has_i2c) {
2614 SYSCTL_ADD_PROC(ctx, debug_list,
2615 OID_AUTO, "read_i2c_byte",
2616 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2617 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2618
2619 SYSCTL_ADD_PROC(ctx, debug_list,
2620 OID_AUTO, "write_i2c_byte",
2621 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2622 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2623
2624 SYSCTL_ADD_PROC(ctx, debug_list,
2625 OID_AUTO, "read_i2c_diag_data",
2626 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2627 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2628 }
2629}
2630
2631/*
2632 * Primarily for finding out how many queues can be assigned to VFs,
2633 * at runtime.
2634 */
2635static int
2636ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2637{
2638 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2639 int queues;
2640
2641 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2642
2643 return sysctl_handle_int(oidp, NULL, queues, req);
2644}
2645
2646static const char *
2647ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2648{
2649 const char * link_speed_str[] = {
2650 "Unknown",
2651 "100 Mbps",
2652 "1 Gbps",
2653 "10 Gbps",
2654 "40 Gbps",
2655 "20 Gbps",
2656 "25 Gbps",
2657 "2.5 Gbps",
2658 "5 Gbps"
2659 };
2660 int index;
2661
2662 switch (link_speed) {
2663 case I40E_LINK_SPEED_100MB:
2664 index = 1;
2665 break;
2666 case I40E_LINK_SPEED_1GB:
2667 index = 2;
2668 break;
2669 case I40E_LINK_SPEED_10GB:
2670 index = 3;
2671 break;
2672 case I40E_LINK_SPEED_40GB:
2673 index = 4;
2674 break;
2675 case I40E_LINK_SPEED_20GB:
2676 index = 5;
2677 break;
2678 case I40E_LINK_SPEED_25GB:
2679 index = 6;
2680 break;
2681 case I40E_LINK_SPEED_2_5GB:
2682 index = 7;
2683 break;
2684 case I40E_LINK_SPEED_5GB:
2685 index = 8;
2686 break;
2687 case I40E_LINK_SPEED_UNKNOWN:
2688 default:
2689 index = 0;
2690 break;
2691 }
2692
2693 return (link_speed_str[index]);
2694}
2695
2696int
2697ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2698{
2699 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2700 struct i40e_hw *hw = &pf->hw;
2701 int error = 0;
2702
2703 ixl_update_link_status(pf);
2704
2705 error = sysctl_handle_string(oidp,
2706 __DECONST(void *,
2707 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2708 8, req);
2709
2710 return (error);
2711}
2712
2713/*
2714 * Converts 8-bit speeds value to and from sysctl flags and
2715 * Admin Queue flags.
2716 */
2717static u8
2718ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2719{
2720#define SPEED_MAP_SIZE 8
2721 static u16 speedmap[SPEED_MAP_SIZE] = {
2722 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2723 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
2724 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
2725 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
2726 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
2727 (I40E_LINK_SPEED_40GB | (0x20 << 8)),
2728 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2729 (I40E_LINK_SPEED_5GB | (0x80 << 8)),
2730 };
2731 u8 retval = 0;
2732
2733 for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2734 if (to_aq)
2735 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2736 else
2737 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2738 }
2739
2740 return (retval);
2741}
2742
2743int
2744ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2745{
2746 struct i40e_hw *hw = &pf->hw;
2747 device_t dev = pf->dev;
2748 struct i40e_aq_get_phy_abilities_resp abilities;
2749 struct i40e_aq_set_phy_config config;
2750 enum i40e_status_code aq_error = 0;
2751
2752 /* Get current capability information */
2753 aq_error = i40e_aq_get_phy_capabilities(hw,
2754 FALSE, FALSE, &abilities, NULL);
2755 if (aq_error) {
2756 device_printf(dev,
2757 "%s: Error getting phy capabilities %d,"
2758 " aq error: %d\n", __func__, aq_error,
2759 hw->aq.asq_last_status);
2760 return (EIO);
2761 }
2762
2763 /* Prepare new config */
2764 bzero(&config, sizeof(config));
2765 if (from_aq)
2766 config.link_speed = speeds;
2767 else
2768 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2769 config.phy_type = abilities.phy_type;
2770 config.phy_type_ext = abilities.phy_type_ext;
2771 config.abilities = abilities.abilities
2772 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2773 config.eee_capability = abilities.eee_capability;
2774 config.eeer = abilities.eeer_val;
2775 config.low_power_ctrl = abilities.d3_lpan;
2776 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2777 & I40E_AQ_PHY_FEC_CONFIG_MASK;
2778
2779 /* Do aq command & restart link */
2780 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2781 if (aq_error) {
2782 device_printf(dev,
2783 "%s: Error setting new phy config %d,"
2784 " aq error: %d\n", __func__, aq_error,
2785 hw->aq.asq_last_status);
2786 return (EIO);
2787 }
2788
2789 return (0);
2790}
2791
2792/*
2793** Supported link speeds
2794** Flags:
2795** 0x1 - 100 Mb
2796** 0x2 - 1G
2797** 0x4 - 10G
2798** 0x8 - 20G
2799** 0x10 - 25G
2800** 0x20 - 40G
2801** 0x40 - 2.5G
2802** 0x80 - 5G
2803*/
2804static int
2805ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2806{
2807 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2808 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2809
2810 return sysctl_handle_int(oidp, NULL, supported, req);
2811}
2812
2813/*
2814** Control link advertise speed:
2815** Flags:
2816** 0x1 - advertise 100 Mb
2817** 0x2 - advertise 1G
2818** 0x4 - advertise 10G
2819** 0x8 - advertise 20G
2820** 0x10 - advertise 25G
2821** 0x20 - advertise 40G
2822** 0x40 - advertise 2.5G
2823** 0x80 - advertise 5G
2824**
2825** Set to 0 to disable link
2826*/
2827int
2828ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2829{
2830 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2831 device_t dev = pf->dev;
2832 u8 converted_speeds;
2833 int requested_ls = 0;
2834 int error = 0;
2835
2836 /* Read in new mode */
2837 requested_ls = pf->advertised_speed;
2838 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2839 if ((error) || (req->newptr == NULL))
2840 return (error);
2841 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2842 device_printf(dev, "Interface is currently in FW recovery mode. "
2843 "Setting advertise speed not supported\n");
2844 return (EINVAL);
2845 }
2846
2847 /* Error out if bits outside of possible flag range are set */
2848 if ((requested_ls & ~((u8)0xFF)) != 0) {
2849 device_printf(dev, "Input advertised speed out of range; "
2850 "valid flags are: 0x%02x\n",
2851 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2852 return (EINVAL);
2853 }
2854
2855 /* Check if adapter supports input value */
2856 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2857 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2858 device_printf(dev, "Invalid advertised speed; "
2859 "valid flags are: 0x%02x\n",
2860 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2861 return (EINVAL);
2862 }
2863
2864 error = ixl_set_advertised_speeds(pf, requested_ls, false);
2865 if (error)
2866 return (error);
2867
2868 pf->advertised_speed = requested_ls;
2869 ixl_update_link_status(pf);
2870 return (0);
2871}
2872
2873/*
2874 * Input: bitmap of enum i40e_aq_link_speed
2875 */
2876u64
2877ixl_max_aq_speed_to_value(u8 link_speeds)
2878{
2879 if (link_speeds & I40E_LINK_SPEED_40GB)
2880 return IF_Gbps(40);
2881 if (link_speeds & I40E_LINK_SPEED_25GB)
2882 return IF_Gbps(25);
2883 if (link_speeds & I40E_LINK_SPEED_20GB)
2884 return IF_Gbps(20);
2885 if (link_speeds & I40E_LINK_SPEED_10GB)
2886 return IF_Gbps(10);
2887 if (link_speeds & I40E_LINK_SPEED_5GB)
2888 return IF_Gbps(5);
2889 if (link_speeds & I40E_LINK_SPEED_2_5GB)
2890 return IF_Mbps(2500);
2891 if (link_speeds & I40E_LINK_SPEED_1GB)
2892 return IF_Gbps(1);
2893 if (link_speeds & I40E_LINK_SPEED_100MB)
2894 return IF_Mbps(100);
2895 else
2896 /* Minimum supported link speed */
2897 return IF_Mbps(100);
2898}
2899
2900/*
2901** Get the width and transaction speed of
2902** the bus this adapter is plugged into.
2903*/
2904void
2905ixl_get_bus_info(struct ixl_pf *pf)
2906{
2907 struct i40e_hw *hw = &pf->hw;
2908 device_t dev = pf->dev;
2909 u16 link;
2910 u32 offset, num_ports;
2911 u64 max_speed;
2912
2913 /* Some devices don't use PCIE */
2914 if (hw->mac.type == I40E_MAC_X722)
2915 return;
2916
2917 /* Read PCI Express Capabilities Link Status Register */
2918 pci_find_cap(dev, PCIY_EXPRESS, &offset);
2919 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2920
2921 /* Fill out hw struct with PCIE info */
2922 i40e_set_pci_config_data(hw, link);
2923
2924 /* Use info to print out bandwidth messages */
2925 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2926 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2927 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2928 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2929 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2930 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2931 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2932 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2933 ("Unknown"));
2934
2935 /*
2936 * If adapter is in slot with maximum supported speed,
2937 * no warning message needs to be printed out.
2938 */
2939 if (hw->bus.speed >= i40e_bus_speed_8000
2940 && hw->bus.width >= i40e_bus_width_pcie_x8)
2941 return;
2942
2943 num_ports = bitcount32(hw->func_caps.valid_functions);
2944 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2945
2946 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2947 device_printf(dev, "PCI-Express bandwidth available"
2948 " for this device may be insufficient for"
2949 " optimal performance.\n");
2950 device_printf(dev, "Please move the device to a different"
2951 " PCI-e link with more lanes and/or higher"
2952 " transfer rate.\n");
2953 }
2954}
2955
2956static int
2957ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2958{
2959 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2960 struct i40e_hw *hw = &pf->hw;
2961 struct sbuf *sbuf;
2962
2963 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2964 ixl_nvm_version_str(hw, sbuf);
2965 sbuf_finish(sbuf);
2966 sbuf_delete(sbuf);
2967
2968 return (0);
2969}
2970
2971void
2972ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2973{
2974 u8 nvma_ptr = nvma->config & 0xFF;
2975 u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2976 const char * cmd_str;
2977
2978 switch (nvma->command) {
2979 case I40E_NVM_READ:
2980 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2981 nvma->offset == 0 && nvma->data_size == 1) {
2982 device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2983 return;
2984 }
2985 cmd_str = "READ ";
2986 break;
2987 case I40E_NVM_WRITE:
2988 cmd_str = "WRITE";
2989 break;
2990 default:
2991 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2992 return;
2993 }
2994 device_printf(dev,
2995 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2996 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2997}
2998
2999int
3000ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3001{
3002 struct i40e_hw *hw = &pf->hw;
3003 struct i40e_nvm_access *nvma;
3004 device_t dev = pf->dev;
3005 enum i40e_status_code status = 0;
3006 size_t nvma_size, ifd_len, exp_len;
3007 int err, perrno;
3008
3009 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3010
3011 /* Sanity checks */
3012 nvma_size = sizeof(struct i40e_nvm_access);
3013 ifd_len = ifd->ifd_len;
3014
3015 if (ifd_len < nvma_size ||
3016 ifd->ifd_data == NULL) {
3017 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3018 __func__);
3019 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3020 __func__, ifd_len, nvma_size);
3021 device_printf(dev, "%s: data pointer: %p\n", __func__,
3022 ifd->ifd_data);
3023 return (EINVAL);
3024 }
3025
3026 nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3027 err = copyin(ifd->ifd_data, nvma, ifd_len);
3028 if (err) {
3029 device_printf(dev, "%s: Cannot get request from user space\n",
3030 __func__);
3031 free(nvma, M_IXL);
3032 return (err);
3033 }
3034
3035 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3036 ixl_print_nvm_cmd(dev, nvma);
3037
3038 if (IXL_PF_IS_RESETTING(pf)) {
3039 int count = 0;
3040 while (count++ < 100) {
3041 i40e_msec_delay(100);
3042 if (!(IXL_PF_IS_RESETTING(pf)))
3043 break;
3044 }
3045 }
3046
3047 if (IXL_PF_IS_RESETTING(pf)) {
3048 device_printf(dev,
3049 "%s: timeout waiting for EMP reset to finish\n",
3050 __func__);
3051 free(nvma, M_IXL);
3052 return (-EBUSY);
3053 }
3054
3055 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3056 device_printf(dev,
3057 "%s: invalid request, data size not in supported range\n",
3058 __func__);
3059 free(nvma, M_IXL);
3060 return (EINVAL);
3061 }
3062
3063 /*
3064 * Older versions of the NVM update tool don't set ifd_len to the size
3065 * of the entire buffer passed to the ioctl. Check the data_size field
3066 * in the contained i40e_nvm_access struct and ensure everything is
3067 * copied in from userspace.
3068 */
3069 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3070
3071 if (ifd_len < exp_len) {
3072 ifd_len = exp_len;
3073 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3074 err = copyin(ifd->ifd_data, nvma, ifd_len);
3075 if (err) {
3076 device_printf(dev, "%s: Cannot get request from user space\n",
3077 __func__);
3078 free(nvma, M_IXL);
3079 return (err);
3080 }
3081 }
3082
3083 // TODO: Might need a different lock here
3084 // IXL_PF_LOCK(pf);
3085 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3086 // IXL_PF_UNLOCK(pf);
3087
3088 err = copyout(nvma, ifd->ifd_data, ifd_len);
3089 free(nvma, M_IXL);
3090 if (err) {
3091 device_printf(dev, "%s: Cannot return data to user space\n",
3092 __func__);
3093 return (err);
3094 }
3095
3096 /* Let the nvmupdate report errors, show them only when debug is enabled */
3097 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3098 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3099 i40e_stat_str(hw, status), perrno);
3100
3101 /*
3102 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3103 * to run this ioctl again. So use -EACCES for -EPERM instead.
3104 */
3105 if (perrno == -EPERM)
3106 return (-EACCES);
3107 else
3108 return (perrno);
3109}
3110
3111int
3112ixl_find_i2c_interface(struct ixl_pf *pf)
3113{
3114 struct i40e_hw *hw = &pf->hw;
3115 bool i2c_en, port_matched;
3116 u32 reg;
3117
3118 for (int i = 0; i < 4; i++) {
3119 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3120 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3121 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3122 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3123 & BIT(hw->port);
3124 if (i2c_en && port_matched)
3125 return (i);
3126 }
3127
3128 return (-1);
3129}
3130
3131void
3132ixl_set_link(struct ixl_pf *pf, bool enable)
3133{
3134 struct i40e_hw *hw = &pf->hw;
3135 device_t dev = pf->dev;
3136 struct i40e_aq_get_phy_abilities_resp abilities;
3137 struct i40e_aq_set_phy_config config;
3138 enum i40e_status_code aq_error = 0;
3139 u32 phy_type, phy_type_ext;
3140
3141 /* Get initial capability information */
3142 aq_error = i40e_aq_get_phy_capabilities(hw,
3143 FALSE, TRUE, &abilities, NULL);
3144 if (aq_error) {
3145 device_printf(dev,
3146 "%s: Error getting phy capabilities %d,"
3147 " aq error: %d\n", __func__, aq_error,
3148 hw->aq.asq_last_status);
3149 return;
3150 }
3151
3152 phy_type = abilities.phy_type;
3153 phy_type_ext = abilities.phy_type_ext;
3154
3155 /* Get current capability information */
3156 aq_error = i40e_aq_get_phy_capabilities(hw,
3157 FALSE, FALSE, &abilities, NULL);
3158 if (aq_error) {
3159 device_printf(dev,
3160 "%s: Error getting phy capabilities %d,"
3161 " aq error: %d\n", __func__, aq_error,
3162 hw->aq.asq_last_status);
3163 return;
3164 }
3165
3166 /* Prepare new config */
3167 memset(&config, 0, sizeof(config));
3168 config.link_speed = abilities.link_speed;
3169 config.abilities = abilities.abilities;
3170 config.eee_capability = abilities.eee_capability;
3171 config.eeer = abilities.eeer_val;
3172 config.low_power_ctrl = abilities.d3_lpan;
3173 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3174 & I40E_AQ_PHY_FEC_CONFIG_MASK;
3175 config.phy_type = 0;
3176 config.phy_type_ext = 0;
3177
3178 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3179 I40E_AQ_PHY_FLAG_PAUSE_RX);
3180
3181 switch (pf->fc) {
3182 case I40E_FC_FULL:
3183 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3184 I40E_AQ_PHY_FLAG_PAUSE_RX;
3185 break;
3186 case I40E_FC_RX_PAUSE:
3187 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3188 break;
3189 case I40E_FC_TX_PAUSE:
3190 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3191 break;
3192 default:
3193 break;
3194 }
3195
3196 if (enable) {
3197 config.phy_type = phy_type;
3198 config.phy_type_ext = phy_type_ext;
3199
3200 }
3201
3202 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3203 if (aq_error) {
3204 device_printf(dev,
3205 "%s: Error setting new phy config %d,"
3206 " aq error: %d\n", __func__, aq_error,
3207 hw->aq.asq_last_status);
3208 return;
3209 }
3210
3211 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3212 if (aq_error) {
3213 device_printf(dev,
3214 "%s: Error set link config %d,"
3215 " aq error: %d\n", __func__, aq_error,
3216 hw->aq.asq_last_status);
3217 return;
3218 }
3219}
3220
3221static char *
3222ixl_phy_type_string(u32 bit_pos, bool ext)
3223{
3224 static char * phy_types_str[32] = {
3225 "SGMII",
3226 "1000BASE-KX",
3227 "10GBASE-KX4",
3228 "10GBASE-KR",
3229 "40GBASE-KR4",
3230 "XAUI",
3231 "XFI",
3232 "SFI",
3233 "XLAUI",
3234 "XLPPI",
3235 "40GBASE-CR4",
3236 "10GBASE-CR1",
3237 "SFP+ Active DA",
3238 "QSFP+ Active DA",
3239 "Reserved (14)",
3240 "Reserved (15)",
3241 "Reserved (16)",
3242 "100BASE-TX",
3243 "1000BASE-T",
3244 "10GBASE-T",
3245 "10GBASE-SR",
3246 "10GBASE-LR",
3247 "10GBASE-SFP+Cu",
3248 "10GBASE-CR1",
3249 "40GBASE-CR4",
3250 "40GBASE-SR4",
3251 "40GBASE-LR4",
3252 "1000BASE-SX",
3253 "1000BASE-LX",
3254 "1000BASE-T Optical",
3255 "20GBASE-KR2",
3256 "Reserved (31)"
3257 };
3258 static char * ext_phy_types_str[8] = {
3259 "25GBASE-KR",
3260 "25GBASE-CR",
3261 "25GBASE-SR",
3262 "25GBASE-LR",
3263 "25GBASE-AOC",
3264 "25GBASE-ACC",
3265 "2.5GBASE-T",
3266 "5GBASE-T"
3267 };
3268
3269 if (ext && bit_pos > 7) return "Invalid_Ext";
3270 if (bit_pos > 31) return "Invalid";
3271
3272 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3273}
3274
3275/* TODO: ERJ: I don't this is necessary anymore. */
3276int
3277ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3278{
3279 device_t dev = pf->dev;
3280 struct i40e_hw *hw = &pf->hw;
3281 struct i40e_aq_desc desc;
3282 enum i40e_status_code status;
3283
3284 struct i40e_aqc_get_link_status *aq_link_status =
3285 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3286
3287 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3288 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3289 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3290 if (status) {
3291 device_printf(dev,
3292 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3293 __func__, i40e_stat_str(hw, status),
3294 i40e_aq_str(hw, hw->aq.asq_last_status));
3295 return (EIO);
3296 }
3297
3298 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3299 return (0);
3300}
3301
3302static char *
3303ixl_phy_type_string_ls(u8 val)
3304{
3305 if (val >= 0x1F)
3306 return ixl_phy_type_string(val - 0x1F, true);
3307 else
3308 return ixl_phy_type_string(val, false);
3309}
3310
3311static int
3312ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3313{
3314 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3315 device_t dev = pf->dev;
3316 struct sbuf *buf;
3317 int error = 0;
3318
3319 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3320 if (!buf) {
3321 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3322 return (ENOMEM);
3323 }
3324
3325 struct i40e_aqc_get_link_status link_status;
3326 error = ixl_aq_get_link_status(pf, &link_status);
3327 if (error) {
3328 sbuf_delete(buf);
3329 return (error);
3330 }
3331
3332 sbuf_printf(buf, "\n"
3333 "PHY Type : 0x%02x<%s>\n"
3334 "Speed : 0x%02x\n"
3335 "Link info: 0x%02x\n"
3336 "AN info : 0x%02x\n"
3337 "Ext info : 0x%02x\n"
3338 "Loopback : 0x%02x\n"
3339 "Max Frame: %d\n"
3340 "Config : 0x%02x\n"
3341 "Power : 0x%02x",
3342 link_status.phy_type,
3343 ixl_phy_type_string_ls(link_status.phy_type),
3344 link_status.link_speed,
3345 link_status.link_info,
3346 link_status.an_info,
3347 link_status.ext_info,
3348 link_status.loopback,
3349 link_status.max_frame_size,
3350 link_status.config,
3351 link_status.power_desc);
3352
3353 error = sbuf_finish(buf);
3354 if (error)
3355 device_printf(dev, "Error finishing sbuf: %d\n", error);
3356
3357 sbuf_delete(buf);
3358 return (error);
3359}
3360
3361static int
3362ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3363{
3364 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3365 struct i40e_hw *hw = &pf->hw;
3366 device_t dev = pf->dev;
3367 enum i40e_status_code status;
3368 struct i40e_aq_get_phy_abilities_resp abilities;
3369 struct sbuf *buf;
3370 int error = 0;
3371
3372 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3373 if (!buf) {
3374 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3375 return (ENOMEM);
3376 }
3377
3378 status = i40e_aq_get_phy_capabilities(hw,
3379 FALSE, arg2 != 0, &abilities, NULL);
3380 if (status) {
3381 device_printf(dev,
3382 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3383 __func__, i40e_stat_str(hw, status),
3384 i40e_aq_str(hw, hw->aq.asq_last_status));
3385 sbuf_delete(buf);
3386 return (EIO);
3387 }
3388
3389 sbuf_printf(buf, "\n"
3390 "PHY Type : %08x",
3391 abilities.phy_type);
3392
3393 if (abilities.phy_type != 0) {
3394 sbuf_printf(buf, "<");
3395 for (int i = 0; i < 32; i++)
3396 if ((1 << i) & abilities.phy_type)
3397 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3398 sbuf_printf(buf, ">");
3399 }
3400
3401 sbuf_printf(buf, "\nPHY Ext : %02x",
3402 abilities.phy_type_ext);
3403
3404 if (abilities.phy_type_ext != 0) {
3405 sbuf_printf(buf, "<");
3406 for (int i = 0; i < 4; i++)
3407 if ((1 << i) & abilities.phy_type_ext)
3408 sbuf_printf(buf, "%s,",
3409 ixl_phy_type_string(i, true));
3410 sbuf_printf(buf, ">");
3411 }
3412
3413 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
3414 if (abilities.link_speed != 0) {
3415 u8 link_speed;
3416 sbuf_printf(buf, " <");
3417 for (int i = 0; i < 8; i++) {
3418 link_speed = (1 << i) & abilities.link_speed;
3419 if (link_speed)
3420 sbuf_printf(buf, "%s, ",
3421 ixl_link_speed_string(link_speed));
3422 }
3423 sbuf_printf(buf, ">");
3424 }
3425
3426 sbuf_printf(buf, "\n"
3427 "Abilities: %02x\n"
3428 "EEE cap : %04x\n"
3429 "EEER reg : %08x\n"
3430 "D3 Lpan : %02x\n"
3431 "ID : %02x %02x %02x %02x\n"
3432 "ModType : %02x %02x %02x\n"
3433 "ModType E: %01x\n"
3434 "FEC Cfg : %02x\n"
3435 "Ext CC : %02x",
3436 abilities.abilities, abilities.eee_capability,
3437 abilities.eeer_val, abilities.d3_lpan,
3438 abilities.phy_id[0], abilities.phy_id[1],
3439 abilities.phy_id[2], abilities.phy_id[3],
3440 abilities.module_type[0], abilities.module_type[1],
3441 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3442 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3443 abilities.ext_comp_code);
3444
3445 error = sbuf_finish(buf);
3446 if (error)
3447 device_printf(dev, "Error finishing sbuf: %d\n", error);
3448
3449 sbuf_delete(buf);
3450 return (error);
3451}
3452
3453static int
3454ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3455{
3456 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3457 struct ixl_vsi *vsi = &pf->vsi;
3458 struct ixl_mac_filter *f;
3459 device_t dev = pf->dev;
3460 int error = 0, ftl_len = 0, ftl_counter = 0;
3461
3462 struct sbuf *buf;
3463
3464 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3465 if (!buf) {
3466 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3467 return (ENOMEM);
3468 }
3469
3470 sbuf_printf(buf, "\n");
3471
3472 /* Print MAC filters */
3473 sbuf_printf(buf, "PF Filters:\n");
3474 LIST_FOREACH(f, &vsi->ftl, ftle)
3475 ftl_len++;
3476
3477 if (ftl_len < 1)
3478 sbuf_printf(buf, "(none)\n");
3479 else {
3480 LIST_FOREACH(f, &vsi->ftl, ftle) {
3481 sbuf_printf(buf,
3482 MAC_FORMAT ", vlan %4d, flags %#06x",
3483 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3484 /* don't print '\n' for last entry */
3485 if (++ftl_counter != ftl_len)
3486 sbuf_printf(buf, "\n");
3487 }
3488 }
3489
3490#ifdef PCI_IOV
3491 /* TODO: Give each VF its own filter list sysctl */
3492 struct ixl_vf *vf;
3493 if (pf->num_vfs > 0) {
3494 sbuf_printf(buf, "\n\n");
3495 for (int i = 0; i < pf->num_vfs; i++) {
3496 vf = &pf->vfs[i];
3497 if (!(vf->vf_flags & VF_FLAG_ENABLED))
3498 continue;
3499
3500 vsi = &vf->vsi;
3501 ftl_len = 0, ftl_counter = 0;
3502 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3503 LIST_FOREACH(f, &vsi->ftl, ftle)
3504 ftl_len++;
3505
3506 if (ftl_len < 1)
3507 sbuf_printf(buf, "(none)\n");
3508 else {
3509 LIST_FOREACH(f, &vsi->ftl, ftle) {
3510 sbuf_printf(buf,
3511 MAC_FORMAT ", vlan %4d, flags %#06x\n",
3512 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3513 }
3514 }
3515 }
3516 }
3517#endif
3518
3519 error = sbuf_finish(buf);
3520 if (error)
3521 device_printf(dev, "Error finishing sbuf: %d\n", error);
3522 sbuf_delete(buf);
3523
3524 return (error);
3525}
3526
3527#define IXL_SW_RES_SIZE 0x14
3528int
3529ixl_res_alloc_cmp(const void *a, const void *b)
3530{
3531 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3532 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3533 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3534
3535 return ((int)one->resource_type - (int)two->resource_type);
3536}
3537
3538/*
3539 * Longest string length: 25
3540 */
3541const char *
3542ixl_switch_res_type_string(u8 type)
3543{
3544 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3545 "VEB",
3546 "VSI",
3547 "Perfect Match MAC address",
3548 "S-tag",
3549 "(Reserved)",
3550 "Multicast hash entry",
3551 "Unicast hash entry",
3552 "VLAN",
3553 "VSI List entry",
3554 "(Reserved)",
3555 "VLAN Statistic Pool",
3556 "Mirror Rule",
3557 "Queue Set",
3558 "Inner VLAN Forward filter",
3559 "(Reserved)",
3560 "Inner MAC",
3561 "IP",
3562 "GRE/VN1 Key",
3563 "VN2 Key",
3564 "Tunneling Port"
3565 };
3566
3567 if (type < IXL_SW_RES_SIZE)
3568 return ixl_switch_res_type_strings[type];
3569 else
3570 return "(Reserved)";
3571}
3572
3573static int
3574ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3575{
3576 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3577 struct i40e_hw *hw = &pf->hw;
3578 device_t dev = pf->dev;
3579 struct sbuf *buf;
3580 enum i40e_status_code status;
3581 int error = 0;
3582
3583 u8 num_entries;
3584 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3585
3586 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3587 if (!buf) {
3588 device_printf(dev, "Could not allocate sbuf for output.\n");
3589 return (ENOMEM);
3590 }
3591
3592 bzero(resp, sizeof(resp));
3593 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3594 resp,
3595 IXL_SW_RES_SIZE,
3596 NULL);
3597 if (status) {
3598 device_printf(dev,
3599 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3600 __func__, i40e_stat_str(hw, status),
3601 i40e_aq_str(hw, hw->aq.asq_last_status));
3602 sbuf_delete(buf);
3603 return (error);
3604 }
3605
3606 /* Sort entries by type for display */
3607 qsort(resp, num_entries,
3608 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3609 &ixl_res_alloc_cmp);
3610
3611 sbuf_cat(buf, "\n");
3612 sbuf_printf(buf, "# of entries: %d\n", num_entries);
3613 sbuf_printf(buf,
3614 " Type | Guaranteed | Total | Used | Un-allocated\n"
3615 " | (this) | (all) | (this) | (all) \n");
3616 for (int i = 0; i < num_entries; i++) {
3617 sbuf_printf(buf,
3618 "%25s | %10d %5d %6d %12d",
3619 ixl_switch_res_type_string(resp[i].resource_type),
3620 resp[i].guaranteed,
3621 resp[i].total,
3622 resp[i].used,
3623 resp[i].total_unalloced);
3624 if (i < num_entries - 1)
3625 sbuf_cat(buf, "\n");
3626 }
3627
3628 error = sbuf_finish(buf);
3629 if (error)
3630 device_printf(dev, "Error finishing sbuf: %d\n", error);
3631
3632 sbuf_delete(buf);
3633 return (error);
3634}
3635
3636enum ixl_sw_seid_offset {
3637 IXL_SW_SEID_EMP = 1,
3638 IXL_SW_SEID_MAC_START = 2,
3639 IXL_SW_SEID_MAC_END = 5,
3640 IXL_SW_SEID_PF_START = 16,
3641 IXL_SW_SEID_PF_END = 31,
3642 IXL_SW_SEID_VF_START = 32,
3643 IXL_SW_SEID_VF_END = 159,
3644};
3645
3646/*
3647 * Caller must init and delete sbuf; this function will clear and
3648 * finish it for caller.
3649 *
3650 * Note: The SEID argument only applies for elements defined by FW at
3651 * power-on; these include the EMP, Ports, PFs and VFs.
3652 */
3653static char *
3654ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3655{
3656 sbuf_clear(s);
3657
3658 /* If SEID is in certain ranges, then we can infer the
3659 * mapping of SEID to switch element.
3660 */
3661 if (seid == IXL_SW_SEID_EMP) {
3662 sbuf_cat(s, "EMP");
3663 goto out;
3664 } else if (seid >= IXL_SW_SEID_MAC_START &&
3665 seid <= IXL_SW_SEID_MAC_END) {
3666 sbuf_printf(s, "MAC %2d",
3667 seid - IXL_SW_SEID_MAC_START);
3668 goto out;
3669 } else if (seid >= IXL_SW_SEID_PF_START &&
3670 seid <= IXL_SW_SEID_PF_END) {
3671 sbuf_printf(s, "PF %3d",
3672 seid - IXL_SW_SEID_PF_START);
3673 goto out;
3674 } else if (seid >= IXL_SW_SEID_VF_START &&
3675 seid <= IXL_SW_SEID_VF_END) {
3676 sbuf_printf(s, "VF %3d",
3677 seid - IXL_SW_SEID_VF_START);
3678 goto out;
3679 }
3680
3681 switch (element_type) {
3682 case I40E_AQ_SW_ELEM_TYPE_BMC:
3683 sbuf_cat(s, "BMC");
3684 break;
3685 case I40E_AQ_SW_ELEM_TYPE_PV:
3686 sbuf_cat(s, "PV");
3687 break;
3688 case I40E_AQ_SW_ELEM_TYPE_VEB:
3689 sbuf_cat(s, "VEB");
3690 break;
3691 case I40E_AQ_SW_ELEM_TYPE_PA:
3692 sbuf_cat(s, "PA");
3693 break;
3694 case I40E_AQ_SW_ELEM_TYPE_VSI:
3695 sbuf_printf(s, "VSI");
3696 break;
3697 default:
3698 sbuf_cat(s, "?");
3699 break;
3700 }
3701
3702out:
3703 sbuf_finish(s);
3704 return sbuf_data(s);
3705}
3706
3707static int
3708ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3709{
3710 const struct i40e_aqc_switch_config_element_resp *one, *two;
3711 one = (const struct i40e_aqc_switch_config_element_resp *)a;
3712 two = (const struct i40e_aqc_switch_config_element_resp *)b;
3713
3714 return ((int)one->seid - (int)two->seid);
3715}
3716
3717static int
3718ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3719{
3720 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3721 struct i40e_hw *hw = &pf->hw;
3722 device_t dev = pf->dev;
3723 struct sbuf *buf;
3724 struct sbuf *nmbuf;
3725 enum i40e_status_code status;
3726 int error = 0;
3727 u16 next = 0;
3728 u8 aq_buf[I40E_AQ_LARGE_BUF];
3729
3730 struct i40e_aqc_switch_config_element_resp *elem;
3731 struct i40e_aqc_get_switch_config_resp *sw_config;
3732 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3733
3734 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3735 if (!buf) {
3736 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3737 return (ENOMEM);
3738 }
3739
3740 status = i40e_aq_get_switch_config(hw, sw_config,
3741 sizeof(aq_buf), &next, NULL);
3742 if (status) {
3743 device_printf(dev,
3744 "%s: aq_get_switch_config() error %s, aq error %s\n",
3745 __func__, i40e_stat_str(hw, status),
3746 i40e_aq_str(hw, hw->aq.asq_last_status));
3747 sbuf_delete(buf);
3748 return error;
3749 }
3750 if (next)
3751 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3752 __func__, next);
3753
3754 nmbuf = sbuf_new_auto();
3755 if (!nmbuf) {
3756 device_printf(dev, "Could not allocate sbuf for name output.\n");
3757 sbuf_delete(buf);
3758 return (ENOMEM);
3759 }
3760
3761 /* Sort entries by SEID for display */
3762 qsort(sw_config->element, sw_config->header.num_reported,
3763 sizeof(struct i40e_aqc_switch_config_element_resp),
3764 &ixl_sw_cfg_elem_seid_cmp);
3765
3766 sbuf_cat(buf, "\n");
3767 /* Assuming <= 255 elements in switch */
3768 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3769 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3770 /* Exclude:
3771 * Revision -- all elements are revision 1 for now
3772 */
3773 sbuf_printf(buf,
3774 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n"
3775 " | | | (uplink)\n");
3776 for (int i = 0; i < sw_config->header.num_reported; i++) {
3777 elem = &sw_config->element[i];
3778
3779 // "%4d (%8s) | %8s %8s %#8x",
3780 sbuf_printf(buf, "%4d", elem->seid);
3781 sbuf_cat(buf, " ");
3782 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3783 elem->element_type, elem->seid));
3784 sbuf_cat(buf, " | ");
3785 sbuf_printf(buf, "%4d", elem->uplink_seid);
3786 sbuf_cat(buf, " ");
3787 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3788 0, elem->uplink_seid));
3789 sbuf_cat(buf, " | ");
3790 sbuf_printf(buf, "%4d", elem->downlink_seid);
3791 sbuf_cat(buf, " ");
3792 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3793 0, elem->downlink_seid));
3794 sbuf_cat(buf, " | ");
3795 sbuf_printf(buf, "%8d", elem->connection_type);
3796 if (i < sw_config->header.num_reported - 1)
3797 sbuf_cat(buf, "\n");
3798 }
3799 sbuf_delete(nmbuf);
3800
3801 error = sbuf_finish(buf);
3802 if (error)
3803 device_printf(dev, "Error finishing sbuf: %d\n", error);
3804
3805 sbuf_delete(buf);
3806
3807 return (error);
3808}
3809
3810static int
3811ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3812{
3813 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3814 struct i40e_hw *hw = &pf->hw;
3815 device_t dev = pf->dev;
3816 int requested_vlan = -1;
3817 enum i40e_status_code status = 0;
3818 int error = 0;
3819
3820 error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3821 if ((error) || (req->newptr == NULL))
3822 return (error);
3823
3824 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3825 device_printf(dev, "Flags disallow setting of vlans\n");
3826 return (ENODEV);
3827 }
3828
3829 hw->switch_tag = requested_vlan;
3830 device_printf(dev,
3831 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3832 hw->switch_tag, hw->first_tag, hw->second_tag);
3833 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3834 if (status) {
3835 device_printf(dev,
3836 "%s: aq_set_switch_config() error %s, aq error %s\n",
3837 __func__, i40e_stat_str(hw, status),
3838 i40e_aq_str(hw, hw->aq.asq_last_status));
3839 return (status);
3840 }
3841 return (0);
3842}
3843
3844static int
3845ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3846{
3847 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3848 struct i40e_hw *hw = &pf->hw;
3849 device_t dev = pf->dev;
3850 struct sbuf *buf;
3851 int error = 0;
3852 enum i40e_status_code status;
3853 u32 reg;
3854
3855 struct i40e_aqc_get_set_rss_key_data key_data;
3856
3857 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3858 if (!buf) {
3859 device_printf(dev, "Could not allocate sbuf for output.\n");
3860 return (ENOMEM);
3861 }
3862
3863 bzero(&key_data, sizeof(key_data));
3864
3865 sbuf_cat(buf, "\n");
3866 if (hw->mac.type == I40E_MAC_X722) {
3867 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3868 if (status)
3869 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3870 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3871 } else {
3872 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3873 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3874 bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
3875 }
3876 }
3877
3878 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3879
3880 error = sbuf_finish(buf);
3881 if (error)
3882 device_printf(dev, "Error finishing sbuf: %d\n", error);
3883 sbuf_delete(buf);
3884
3885 return (error);
3886}
3887
3888static void
3889ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3890{
3891 int i, j, k, width;
3892 char c;
3893
3894 if (length < 1 || buf == NULL) return;
3895
3896 int byte_stride = 16;
3897 int lines = length / byte_stride;
3898 int rem = length % byte_stride;
3899 if (rem > 0)
3900 lines++;
3901
3902 for (i = 0; i < lines; i++) {
3903 width = (rem > 0 && i == lines - 1)
3904 ? rem : byte_stride;
3905
3906 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3907
3908 for (j = 0; j < width; j++)
3909 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3910
3911 if (width < byte_stride) {
3912 for (k = 0; k < (byte_stride - width); k++)
3913 sbuf_printf(sb, " ");
3914 }
3915
3916 if (!text) {
3917 sbuf_printf(sb, "\n");
3918 continue;
3919 }
3920
3921 for (j = 0; j < width; j++) {
3922 c = (char)buf[i * byte_stride + j];
3923 if (c < 32 || c > 126)
3924 sbuf_printf(sb, ".");
3925 else
3926 sbuf_printf(sb, "%c", c);
3927
3928 if (j == width - 1)
3929 sbuf_printf(sb, "\n");
3930 }
3931 }
3932}
3933
3934static int
3935ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3936{
3937 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3938 struct i40e_hw *hw = &pf->hw;
3939 device_t dev = pf->dev;
3940 struct sbuf *buf;
3941 int error = 0;
3942 enum i40e_status_code status;
3943 u8 hlut[512];
3944 u32 reg;
3945
3946 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3947 if (!buf) {
3948 device_printf(dev, "Could not allocate sbuf for output.\n");
3949 return (ENOMEM);
3950 }
3951
3952 bzero(hlut, sizeof(hlut));
3953 sbuf_cat(buf, "\n");
3954 if (hw->mac.type == I40E_MAC_X722) {
3955 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3956 if (status)
3957 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3958 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3959 } else {
3960 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3961 reg = rd32(hw, I40E_PFQF_HLUT(i));
3962 bcopy(&reg, &hlut[i << 2], 4);
3963 }
3964 }
3965 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3966
3967 error = sbuf_finish(buf);
3968 if (error)
3969 device_printf(dev, "Error finishing sbuf: %d\n", error);
3970 sbuf_delete(buf);
3971
3972 return (error);
3973}
3974
3975static int
3976ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3977{
3978 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3979 struct i40e_hw *hw = &pf->hw;
3980 u64 hena;
3981
3982 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3983 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3984
3985 return sysctl_handle_long(oidp, NULL, hena, req);
3986}
3987
3988/*
3989 * Sysctl to disable firmware's link management
3990 *
3991 * 1 - Disable link management on this port
3992 * 0 - Re-enable link management
3993 *
3994 * On normal NVMs, firmware manages link by default.
3995 */
3996static int
3997ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3998{
3999 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4000 struct i40e_hw *hw = &pf->hw;
4001 device_t dev = pf->dev;
4002 int requested_mode = -1;
4003 enum i40e_status_code status = 0;
4004 int error = 0;
4005
4006 /* Read in new mode */
4007 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4008 if ((error) || (req->newptr == NULL))
4009 return (error);
4010 /* Check for sane value */
4011 if (requested_mode < 0 || requested_mode > 1) {
4012 device_printf(dev, "Valid modes are 0 or 1\n");
4013 return (EINVAL);
4014 }
4015
4016 /* Set new mode */
4017 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4018 if (status) {
4019 device_printf(dev,
4020 "%s: Error setting new phy debug mode %s,"
4021 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4022 i40e_aq_str(hw, hw->aq.asq_last_status));
4023 return (EIO);
4024 }
4025
4026 return (0);
4027}
4028
4029/*
4030 * Read some diagnostic data from a (Q)SFP+ module
4031 *
4032 * SFP A2 QSFP Lower Page
4033 * Temperature 96-97 22-23
4034 * Vcc 98-99 26-27
4035 * TX power 102-103 34-35..40-41
4036 * RX power 104-105 50-51..56-57
4037 */
4038static int
4039ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4040{
4041 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4042 device_t dev = pf->dev;
4043 struct sbuf *sbuf;
4044 int error = 0;
4045 u8 output;
4046
4047 if (req->oldptr == NULL) {
4048 error = SYSCTL_OUT(req, 0, 128);
4049 return (0);
4050 }
4051
4052 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4053 if (error) {
4054 device_printf(dev, "Error reading from i2c\n");
4055 return (error);
4056 }
4057
4058 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4059 if (output == 0x3) {
4060 /*
4061 * Check for:
4062 * - Internally calibrated data
4063 * - Diagnostic monitoring is implemented
4064 */
4065 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4066 if (!(output & 0x60)) {
4067 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4068 return (0);
4069 }
4070
4071 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4072
4073 for (u8 offset = 96; offset < 100; offset++) {
4074 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4075 sbuf_printf(sbuf, "%02X ", output);
4076 }
4077 for (u8 offset = 102; offset < 106; offset++) {
4078 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4079 sbuf_printf(sbuf, "%02X ", output);
4080 }
4081 } else if (output == 0xD || output == 0x11) {
4082 /*
4083 * QSFP+ modules are always internally calibrated, and must indicate
4084 * what types of diagnostic monitoring are implemented
4085 */
4086 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4087
4088 for (u8 offset = 22; offset < 24; offset++) {
4089 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4090 sbuf_printf(sbuf, "%02X ", output);
4091 }
4092 for (u8 offset = 26; offset < 28; offset++) {
4093 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4094 sbuf_printf(sbuf, "%02X ", output);
4095 }
4096 /* Read the data from the first lane */
4097 for (u8 offset = 34; offset < 36; offset++) {
4098 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4099 sbuf_printf(sbuf, "%02X ", output);
4100 }
4101 for (u8 offset = 50; offset < 52; offset++) {
4102 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4103 sbuf_printf(sbuf, "%02X ", output);
4104 }
4105 } else {
4106 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4107 return (0);
4108 }
4109
4110 sbuf_finish(sbuf);
4111 sbuf_delete(sbuf);
4112
4113 return (0);
4114}
4115
4116/*
4117 * Sysctl to read a byte from I2C bus.
4118 *
4119 * Input: 32-bit value:
4120 * bits 0-7: device address (0xA0 or 0xA2)
4121 * bits 8-15: offset (0-255)
4122 * bits 16-31: unused
4123 * Output: 8-bit value read
4124 */
4125static int
4126ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4127{
4128 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4129 device_t dev = pf->dev;
4130 int input = -1, error = 0;
4131 u8 dev_addr, offset, output;
4132
4133 /* Read in I2C read parameters */
4134 error = sysctl_handle_int(oidp, &input, 0, req);
4135 if ((error) || (req->newptr == NULL))
4136 return (error);
4137 /* Validate device address */
4138 dev_addr = input & 0xFF;
4139 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4140 return (EINVAL);
4141 }
4142 offset = (input >> 8) & 0xFF;
4143
4144 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4145 if (error)
4146 return (error);
4147
4148 device_printf(dev, "%02X\n", output);
4149 return (0);
4150}
4151
4152/*
4153 * Sysctl to write a byte to the I2C bus.
4154 *
4155 * Input: 32-bit value:
4156 * bits 0-7: device address (0xA0 or 0xA2)
4157 * bits 8-15: offset (0-255)
4158 * bits 16-23: value to write
4159 * bits 24-31: unused
4160 * Output: 8-bit value written
4161 */
4162static int
4163ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4164{
4165 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4166 device_t dev = pf->dev;
4167 int input = -1, error = 0;
4168 u8 dev_addr, offset, value;
4169
4170 /* Read in I2C write parameters */
4171 error = sysctl_handle_int(oidp, &input, 0, req);
4172 if ((error) || (req->newptr == NULL))
4173 return (error);
4174 /* Validate device address */
4175 dev_addr = input & 0xFF;
4176 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4177 return (EINVAL);
4178 }
4179 offset = (input >> 8) & 0xFF;
4180 value = (input >> 16) & 0xFF;
4181
4182 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4183 if (error)
4184 return (error);
4185
4186 device_printf(dev, "%02X written\n", value);
4187 return (0);
4188}
4189
4190static int
4191ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4192 u8 bit_pos, int *is_set)
4193{
4194 device_t dev = pf->dev;
4195 struct i40e_hw *hw = &pf->hw;
4196 enum i40e_status_code status;
4197
4198 if (IXL_PF_IN_RECOVERY_MODE(pf))
4199 return (EIO);
4200
4201 status = i40e_aq_get_phy_capabilities(hw,
4202 FALSE, FALSE, abilities, NULL);
4203 if (status) {
4204 device_printf(dev,
4205 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4206 __func__, i40e_stat_str(hw, status),
4207 i40e_aq_str(hw, hw->aq.asq_last_status));
4208 return (EIO);
4209 }
4210
4211 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4212 return (0);
4213}
4214
4215static int
4216ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4217 u8 bit_pos, int set)
4218{
4219 device_t dev = pf->dev;
4220 struct i40e_hw *hw = &pf->hw;
4221 struct i40e_aq_set_phy_config config;
4222 enum i40e_status_code status;
4223
4224 /* Set new PHY config */
4225 memset(&config, 0, sizeof(config));
4226 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4227 if (set)
4228 config.fec_config |= bit_pos;
4229 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4230 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4231 config.phy_type = abilities->phy_type;
4232 config.phy_type_ext = abilities->phy_type_ext;
4233 config.link_speed = abilities->link_speed;
4234 config.eee_capability = abilities->eee_capability;
4235 config.eeer = abilities->eeer_val;
4236 config.low_power_ctrl = abilities->d3_lpan;
4237 status = i40e_aq_set_phy_config(hw, &config, NULL);
4238
4239 if (status) {
4240 device_printf(dev,
4241 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4242 __func__, i40e_stat_str(hw, status),
4243 i40e_aq_str(hw, hw->aq.asq_last_status));
4244 return (EIO);
4245 }
4246 }
4247
4248 return (0);
4249}
4250
4251static int
4252ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4253{
4254 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4255 int mode, error = 0;
4256
4257 struct i40e_aq_get_phy_abilities_resp abilities;
4258 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4259 if (error)
4260 return (error);
4261 /* Read in new mode */
4262 error = sysctl_handle_int(oidp, &mode, 0, req);
4263 if ((error) || (req->newptr == NULL))
4264 return (error);
4265
4266 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4267}
4268
4269static int
4270ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4271{
4272 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4273 int mode, error = 0;
4274
4275 struct i40e_aq_get_phy_abilities_resp abilities;
4276 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4277 if (error)
4278 return (error);
4279 /* Read in new mode */
4280 error = sysctl_handle_int(oidp, &mode, 0, req);
4281 if ((error) || (req->newptr == NULL))
4282 return (error);
4283
4284 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4285}
4286
4287static int
4288ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4289{
4290 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4291 int mode, error = 0;
4292
4293 struct i40e_aq_get_phy_abilities_resp abilities;
4294 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4295 if (error)
4296 return (error);
4297 /* Read in new mode */
4298 error = sysctl_handle_int(oidp, &mode, 0, req);
4299 if ((error) || (req->newptr == NULL))
4300 return (error);
4301
4302 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4303}
4304
4305static int
4306ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4307{
4308 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4309 int mode, error = 0;
4310
4311 struct i40e_aq_get_phy_abilities_resp abilities;
4312 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4313 if (error)
4314 return (error);
4315 /* Read in new mode */
4316 error = sysctl_handle_int(oidp, &mode, 0, req);
4317 if ((error) || (req->newptr == NULL))
4318 return (error);
4319
4320 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4321}
4322
4323static int
4324ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4325{
4326 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4327 int mode, error = 0;
4328
4329 struct i40e_aq_get_phy_abilities_resp abilities;
4330 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4331 if (error)
4332 return (error);
4333 /* Read in new mode */
4334 error = sysctl_handle_int(oidp, &mode, 0, req);
4335 if ((error) || (req->newptr == NULL))
4336 return (error);
4337
4338 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4339}
4340
4341static int
4342ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4343{
4344 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4345 struct i40e_hw *hw = &pf->hw;
4346 device_t dev = pf->dev;
4347 struct sbuf *buf;
4348 int error = 0;
4349 enum i40e_status_code status;
4350
4351 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4352 if (!buf) {
4353 device_printf(dev, "Could not allocate sbuf for output.\n");
4354 return (ENOMEM);
4355 }
4356
4357 u8 *final_buff;
4358 /* This amount is only necessary if reading the entire cluster into memory */
4359#define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4360 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4361 if (final_buff == NULL) {
4362 device_printf(dev, "Could not allocate memory for output.\n");
4363 goto out;
4364 }
4365 int final_buff_len = 0;
4366
4367 u8 cluster_id = 1;
4368 bool more = true;
4369
4370 u8 dump_buf[4096];
4371 u16 curr_buff_size = 4096;
4372 u8 curr_next_table = 0;
4373 u32 curr_next_index = 0;
4374
4375 u16 ret_buff_size;
4376 u8 ret_next_table;
4377 u32 ret_next_index;
4378
4379 sbuf_cat(buf, "\n");
4380
4381 while (more) {
4382 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4383 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4384 if (status) {
4385 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4386 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4387 goto free_out;
4388 }
4389
4390 /* copy info out of temp buffer */
4391 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4392 final_buff_len += ret_buff_size;
4393
4394 if (ret_next_table != curr_next_table) {
4395 /* We're done with the current table; we can dump out read data. */
4396 sbuf_printf(buf, "%d:", curr_next_table);
4397 int bytes_printed = 0;
4398 while (bytes_printed <= final_buff_len) {
4399 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4400 bytes_printed += 16;
4401 }
4402 sbuf_cat(buf, "\n");
4403
4404 /* The entire cluster has been read; we're finished */
4405 if (ret_next_table == 0xFF)
4406 break;
4407
4408 /* Otherwise clear the output buffer and continue reading */
4409 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4410 final_buff_len = 0;
4411 }
4412
4413 if (ret_next_index == 0xFFFFFFFF)
4414 ret_next_index = 0;
4415
4416 bzero(dump_buf, sizeof(dump_buf));
4417 curr_next_table = ret_next_table;
4418 curr_next_index = ret_next_index;
4419 }
4420
4421free_out:
4422 free(final_buff, M_IXL);
4423out:
4424 error = sbuf_finish(buf);
4425 if (error)
4426 device_printf(dev, "Error finishing sbuf: %d\n", error);
4427 sbuf_delete(buf);
4428
4429 return (error);
4430}
4431
4432static int
4433ixl_start_fw_lldp(struct ixl_pf *pf)
4434{
4435 struct i40e_hw *hw = &pf->hw;
4436 enum i40e_status_code status;
4437
4438 status = i40e_aq_start_lldp(hw, false, NULL);
4439 if (status != I40E_SUCCESS) {
4440 switch (hw->aq.asq_last_status) {
4441 case I40E_AQ_RC_EEXIST:
4442 device_printf(pf->dev,
4443 "FW LLDP agent is already running\n");
4444 break;
4445 case I40E_AQ_RC_EPERM:
4446 device_printf(pf->dev,
4447 "Device configuration forbids SW from starting "
4448 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4449 "attribute to \"Enabled\" to use this sysctl\n");
4450 return (EINVAL);
4451 default:
4452 device_printf(pf->dev,
4453 "Starting FW LLDP agent failed: error: %s, %s\n",
4454 i40e_stat_str(hw, status),
4455 i40e_aq_str(hw, hw->aq.asq_last_status));
4456 return (EINVAL);
4457 }
4458 }
4459
4460 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4461 return (0);
4462}
4463
4464static int
4465ixl_stop_fw_lldp(struct ixl_pf *pf)
4466{
4467 struct i40e_hw *hw = &pf->hw;
4468 device_t dev = pf->dev;
4469 enum i40e_status_code status;
4470
4471 if (hw->func_caps.npar_enable != 0) {
4472 device_printf(dev,
4473 "Disabling FW LLDP agent is not supported on this device\n");
4474 return (EINVAL);
4475 }
4476
4477 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4478 device_printf(dev,
4479 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4480 return (EINVAL);
4481 }
4482
4483 status = i40e_aq_stop_lldp(hw, true, false, NULL);
4484 if (status != I40E_SUCCESS) {
4485 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4486 device_printf(dev,
4487 "Disabling FW LLDP agent failed: error: %s, %s\n",
4488 i40e_stat_str(hw, status),
4489 i40e_aq_str(hw, hw->aq.asq_last_status));
4490 return (EINVAL);
4491 }
4492
4493 device_printf(dev, "FW LLDP agent is already stopped\n");
4494 }
4495
4496 i40e_aq_set_dcb_parameters(hw, true, NULL);
4497 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4498 return (0);
4499}
4500
4501static int
4502ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4503{
4504 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4505 int state, new_state, error = 0;
4506
4507 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4508
4509 /* Read in new mode */
4510 error = sysctl_handle_int(oidp, &new_state, 0, req);
4511 if ((error) || (req->newptr == NULL))
4512 return (error);
4513
4514 /* Already in requested state */
4515 if (new_state == state)
4516 return (error);
4517
4518 if (new_state == 0)
4519 return ixl_stop_fw_lldp(pf);
4520
4521 return ixl_start_fw_lldp(pf);
4522}
4523
4524static int
4525ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4526{
4527 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4528 int state, new_state;
4529 int sysctl_handle_status = 0;
4530 enum i40e_status_code cmd_status;
4531
4532 /* Init states' values */
4533 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4534
4535 /* Get requested mode */
4536 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4537 if ((sysctl_handle_status) || (req->newptr == NULL))
4538 return (sysctl_handle_status);
4539
4540 /* Check if state has changed */
4541 if (new_state == state)
4542 return (0);
4543
4544 /* Set new state */
4545 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4546
4547 /* Save new state or report error */
4548 if (!cmd_status) {
4549 if (new_state == 0)
4550 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4551 else
4552 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4553 } else if (cmd_status == I40E_ERR_CONFIG)
4554 return (EPERM);
4555 else
4556 return (EIO);
4557
4558 return (0);
4559}
4560
4561static int
4562ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4563{
4564 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4565 int error, state;
4566
4567 state = !!(atomic_load_acq_32(&pf->state) &
4568 IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4569
4570 error = sysctl_handle_int(oidp, &state, 0, req);
4571 if ((error) || (req->newptr == NULL))
4572 return (error);
4573
4574 if (state == 0)
4575 atomic_clear_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4576 else
4577 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4578
4579 return (0);
4580}
4581
4582
4583int
4584ixl_attach_get_link_status(struct ixl_pf *pf)
4585{
4586 struct i40e_hw *hw = &pf->hw;
4587 device_t dev = pf->dev;
4588 int error = 0;
4589
4590 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4591 (hw->aq.fw_maj_ver < 4)) {
4592 i40e_msec_delay(75);
4593 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4594 if (error) {
4595 device_printf(dev, "link restart failed, aq_err=%d\n",
4596 pf->hw.aq.asq_last_status);
4597 return error;
4598 }
4599 }
4600
4601 /* Determine link state */
4602 hw->phy.get_link_info = TRUE;
4603 i40e_get_link_status(hw, &pf->link_up);
4604
4605 /* Flow Control mode not set by user, read current FW settings */
4606 if (pf->fc == -1)
4607 pf->fc = hw->fc.current_mode;
4608
4609 return (0);
4610}
4611
4612static int
4613ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4614{
4615 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4616 int requested = 0, error = 0;
4617
4618 /* Read in new mode */
4619 error = sysctl_handle_int(oidp, &requested, 0, req);
4620 if ((error) || (req->newptr == NULL))
4621 return (error);
4622
4623 /* Initiate the PF reset later in the admin task */
4624 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4625
4626 return (error);
4627}
4628
4629static int
4630ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4631{
4632 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4633 struct i40e_hw *hw = &pf->hw;
4634 int requested = 0, error = 0;
4635
4636 /* Read in new mode */
4637 error = sysctl_handle_int(oidp, &requested, 0, req);
4638 if ((error) || (req->newptr == NULL))
4639 return (error);
4640
4641 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4642
4643 return (error);
4644}
4645
4646static int
4647ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4648{
4649 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4650 struct i40e_hw *hw = &pf->hw;
4651 int requested = 0, error = 0;
4652
4653 /* Read in new mode */
4654 error = sysctl_handle_int(oidp, &requested, 0, req);
4655 if ((error) || (req->newptr == NULL))
4656 return (error);
4657
4658 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4659
4660 return (error);
4661}
4662
4663/*
4664 * Print out mapping of TX queue indexes and Rx queue indexes
4665 * to MSI-X vectors.
4666 */
4667static int
4668ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4669{
4670 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4671 struct ixl_vsi *vsi = &pf->vsi;
4672 device_t dev = pf->dev;
4673 struct sbuf *buf;
4674 int error = 0;
4675
4676 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4677 struct ixl_tx_queue *tx_que = vsi->tx_queues;
4678
4679 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4680 if (!buf) {
4681 device_printf(dev, "Could not allocate sbuf for output.\n");
4682 return (ENOMEM);
4683 }
4684
4685 sbuf_cat(buf, "\n");
4686 for (int i = 0; i < vsi->num_rx_queues; i++) {
4687 rx_que = &vsi->rx_queues[i];
4688 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4689 }
4690 for (int i = 0; i < vsi->num_tx_queues; i++) {
4691 tx_que = &vsi->tx_queues[i];
4692 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4693 }
4694
4695 error = sbuf_finish(buf);
4696 if (error)
4697 device_printf(dev, "Error finishing sbuf: %d\n", error);
4698 sbuf_delete(buf);
4699
4700 return (error);
4701}
1923
1924 ixl_prepare_for_reset(pf, is_up);
1925 /*
1926 * i40e_pf_reset checks the type of reset and acts
1927 * accordingly. If EMP or Core reset was performed
1928 * doing PF reset is not necessary and it sometimes
1929 * fails.
1930 */
1931 ixl_pf_reset(pf);
1932
1933 if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1934 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1935 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1936 device_printf(pf->dev,
1937 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1938 pf->link_up = FALSE;
1939 ixl_update_link_status(pf);
1940 }
1941
1942 ixl_rebuild_hw_structs_after_reset(pf, is_up);
1943
1944 atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING);
1945}
1946
1947void
1948ixl_update_stats_counters(struct ixl_pf *pf)
1949{
1950 struct i40e_hw *hw = &pf->hw;
1951 struct ixl_vsi *vsi = &pf->vsi;
1952 struct ixl_vf *vf;
1953 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1954
1955 struct i40e_hw_port_stats *nsd = &pf->stats;
1956 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1957
1958 /* Update hw stats */
1959 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1960 pf->stat_offsets_loaded,
1961 &osd->crc_errors, &nsd->crc_errors);
1962 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1963 pf->stat_offsets_loaded,
1964 &osd->illegal_bytes, &nsd->illegal_bytes);
1965 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1966 I40E_GLPRT_GORCL(hw->port),
1967 pf->stat_offsets_loaded,
1968 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1969 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1970 I40E_GLPRT_GOTCL(hw->port),
1971 pf->stat_offsets_loaded,
1972 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1973 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1974 pf->stat_offsets_loaded,
1975 &osd->eth.rx_discards,
1976 &nsd->eth.rx_discards);
1977 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1978 I40E_GLPRT_UPRCL(hw->port),
1979 pf->stat_offsets_loaded,
1980 &osd->eth.rx_unicast,
1981 &nsd->eth.rx_unicast);
1982 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1983 I40E_GLPRT_UPTCL(hw->port),
1984 pf->stat_offsets_loaded,
1985 &osd->eth.tx_unicast,
1986 &nsd->eth.tx_unicast);
1987 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1988 I40E_GLPRT_MPRCL(hw->port),
1989 pf->stat_offsets_loaded,
1990 &osd->eth.rx_multicast,
1991 &nsd->eth.rx_multicast);
1992 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1993 I40E_GLPRT_MPTCL(hw->port),
1994 pf->stat_offsets_loaded,
1995 &osd->eth.tx_multicast,
1996 &nsd->eth.tx_multicast);
1997 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1998 I40E_GLPRT_BPRCL(hw->port),
1999 pf->stat_offsets_loaded,
2000 &osd->eth.rx_broadcast,
2001 &nsd->eth.rx_broadcast);
2002 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2003 I40E_GLPRT_BPTCL(hw->port),
2004 pf->stat_offsets_loaded,
2005 &osd->eth.tx_broadcast,
2006 &nsd->eth.tx_broadcast);
2007
2008 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2009 pf->stat_offsets_loaded,
2010 &osd->tx_dropped_link_down,
2011 &nsd->tx_dropped_link_down);
2012 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2013 pf->stat_offsets_loaded,
2014 &osd->mac_local_faults,
2015 &nsd->mac_local_faults);
2016 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2017 pf->stat_offsets_loaded,
2018 &osd->mac_remote_faults,
2019 &nsd->mac_remote_faults);
2020 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2021 pf->stat_offsets_loaded,
2022 &osd->rx_length_errors,
2023 &nsd->rx_length_errors);
2024
2025 /* Flow control (LFC) stats */
2026 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2027 pf->stat_offsets_loaded,
2028 &osd->link_xon_rx, &nsd->link_xon_rx);
2029 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2030 pf->stat_offsets_loaded,
2031 &osd->link_xon_tx, &nsd->link_xon_tx);
2032 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2033 pf->stat_offsets_loaded,
2034 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2035 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2036 pf->stat_offsets_loaded,
2037 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2038
2039 /*
2040 * For watchdog management we need to know if we have been paused
2041 * during the last interval, so capture that here.
2042 */
2043 if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2044 vsi->shared->isc_pause_frames = 1;
2045
2046 /* Packet size stats rx */
2047 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2048 I40E_GLPRT_PRC64L(hw->port),
2049 pf->stat_offsets_loaded,
2050 &osd->rx_size_64, &nsd->rx_size_64);
2051 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2052 I40E_GLPRT_PRC127L(hw->port),
2053 pf->stat_offsets_loaded,
2054 &osd->rx_size_127, &nsd->rx_size_127);
2055 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2056 I40E_GLPRT_PRC255L(hw->port),
2057 pf->stat_offsets_loaded,
2058 &osd->rx_size_255, &nsd->rx_size_255);
2059 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2060 I40E_GLPRT_PRC511L(hw->port),
2061 pf->stat_offsets_loaded,
2062 &osd->rx_size_511, &nsd->rx_size_511);
2063 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2064 I40E_GLPRT_PRC1023L(hw->port),
2065 pf->stat_offsets_loaded,
2066 &osd->rx_size_1023, &nsd->rx_size_1023);
2067 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2068 I40E_GLPRT_PRC1522L(hw->port),
2069 pf->stat_offsets_loaded,
2070 &osd->rx_size_1522, &nsd->rx_size_1522);
2071 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2072 I40E_GLPRT_PRC9522L(hw->port),
2073 pf->stat_offsets_loaded,
2074 &osd->rx_size_big, &nsd->rx_size_big);
2075
2076 /* Packet size stats tx */
2077 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2078 I40E_GLPRT_PTC64L(hw->port),
2079 pf->stat_offsets_loaded,
2080 &osd->tx_size_64, &nsd->tx_size_64);
2081 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2082 I40E_GLPRT_PTC127L(hw->port),
2083 pf->stat_offsets_loaded,
2084 &osd->tx_size_127, &nsd->tx_size_127);
2085 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2086 I40E_GLPRT_PTC255L(hw->port),
2087 pf->stat_offsets_loaded,
2088 &osd->tx_size_255, &nsd->tx_size_255);
2089 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2090 I40E_GLPRT_PTC511L(hw->port),
2091 pf->stat_offsets_loaded,
2092 &osd->tx_size_511, &nsd->tx_size_511);
2093 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2094 I40E_GLPRT_PTC1023L(hw->port),
2095 pf->stat_offsets_loaded,
2096 &osd->tx_size_1023, &nsd->tx_size_1023);
2097 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2098 I40E_GLPRT_PTC1522L(hw->port),
2099 pf->stat_offsets_loaded,
2100 &osd->tx_size_1522, &nsd->tx_size_1522);
2101 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2102 I40E_GLPRT_PTC9522L(hw->port),
2103 pf->stat_offsets_loaded,
2104 &osd->tx_size_big, &nsd->tx_size_big);
2105
2106 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2107 pf->stat_offsets_loaded,
2108 &osd->rx_undersize, &nsd->rx_undersize);
2109 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2110 pf->stat_offsets_loaded,
2111 &osd->rx_fragments, &nsd->rx_fragments);
2112 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2113 pf->stat_offsets_loaded,
2114 &osd->rx_oversize, &nsd->rx_oversize);
2115 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2116 pf->stat_offsets_loaded,
2117 &osd->rx_jabber, &nsd->rx_jabber);
2118 /* EEE */
2119 i40e_get_phy_lpi_status(hw, nsd);
2120
2121 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2122 &osd->tx_lpi_count, &nsd->tx_lpi_count,
2123 &osd->rx_lpi_count, &nsd->rx_lpi_count);
2124
2125 pf->stat_offsets_loaded = true;
2126 /* End hw stats */
2127
2128 /* Update vsi stats */
2129 ixl_update_vsi_stats(vsi);
2130
2131 for (int i = 0; i < pf->num_vfs; i++) {
2132 vf = &pf->vfs[i];
2133 if (vf->vf_flags & VF_FLAG_ENABLED)
2134 ixl_update_eth_stats(&pf->vfs[i].vsi);
2135 }
2136}
2137
2138/**
2139 * Update VSI-specific ethernet statistics counters.
2140 **/
2141void
2142ixl_update_eth_stats(struct ixl_vsi *vsi)
2143{
2144 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2145 struct i40e_hw *hw = &pf->hw;
2146 struct i40e_eth_stats *es;
2147 struct i40e_eth_stats *oes;
2148 u16 stat_idx = vsi->info.stat_counter_idx;
2149
2150 es = &vsi->eth_stats;
2151 oes = &vsi->eth_stats_offsets;
2152
2153 /* Gather up the stats that the hw collects */
2154 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2155 vsi->stat_offsets_loaded,
2156 &oes->tx_errors, &es->tx_errors);
2157 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2158 vsi->stat_offsets_loaded,
2159 &oes->rx_discards, &es->rx_discards);
2160
2161 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2162 I40E_GLV_GORCL(stat_idx),
2163 vsi->stat_offsets_loaded,
2164 &oes->rx_bytes, &es->rx_bytes);
2165 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2166 I40E_GLV_UPRCL(stat_idx),
2167 vsi->stat_offsets_loaded,
2168 &oes->rx_unicast, &es->rx_unicast);
2169 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2170 I40E_GLV_MPRCL(stat_idx),
2171 vsi->stat_offsets_loaded,
2172 &oes->rx_multicast, &es->rx_multicast);
2173 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2174 I40E_GLV_BPRCL(stat_idx),
2175 vsi->stat_offsets_loaded,
2176 &oes->rx_broadcast, &es->rx_broadcast);
2177
2178 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2179 I40E_GLV_GOTCL(stat_idx),
2180 vsi->stat_offsets_loaded,
2181 &oes->tx_bytes, &es->tx_bytes);
2182 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2183 I40E_GLV_UPTCL(stat_idx),
2184 vsi->stat_offsets_loaded,
2185 &oes->tx_unicast, &es->tx_unicast);
2186 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2187 I40E_GLV_MPTCL(stat_idx),
2188 vsi->stat_offsets_loaded,
2189 &oes->tx_multicast, &es->tx_multicast);
2190 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2191 I40E_GLV_BPTCL(stat_idx),
2192 vsi->stat_offsets_loaded,
2193 &oes->tx_broadcast, &es->tx_broadcast);
2194 vsi->stat_offsets_loaded = true;
2195}
2196
2197void
2198ixl_update_vsi_stats(struct ixl_vsi *vsi)
2199{
2200 struct ixl_pf *pf;
2201 struct i40e_eth_stats *es;
2202 u64 tx_discards, csum_errs;
2203
2204 struct i40e_hw_port_stats *nsd;
2205
2206 pf = vsi->back;
2207 es = &vsi->eth_stats;
2208 nsd = &pf->stats;
2209
2210 ixl_update_eth_stats(vsi);
2211
2212 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2213
2214 csum_errs = 0;
2215 for (int i = 0; i < vsi->num_rx_queues; i++)
2216 csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2217 nsd->checksum_error = csum_errs;
2218
2219 /* Update ifnet stats */
2220 IXL_SET_IPACKETS(vsi, es->rx_unicast +
2221 es->rx_multicast +
2222 es->rx_broadcast);
2223 IXL_SET_OPACKETS(vsi, es->tx_unicast +
2224 es->tx_multicast +
2225 es->tx_broadcast);
2226 IXL_SET_IBYTES(vsi, es->rx_bytes);
2227 IXL_SET_OBYTES(vsi, es->tx_bytes);
2228 IXL_SET_IMCASTS(vsi, es->rx_multicast);
2229 IXL_SET_OMCASTS(vsi, es->tx_multicast);
2230
2231 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2232 nsd->checksum_error + nsd->rx_length_errors +
2233 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2234 nsd->rx_jabber);
2235 IXL_SET_OERRORS(vsi, es->tx_errors);
2236 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2237 IXL_SET_OQDROPS(vsi, tx_discards);
2238 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2239 IXL_SET_COLLISIONS(vsi, 0);
2240}
2241
2242/**
2243 * Reset all of the stats for the given pf
2244 **/
2245void
2246ixl_pf_reset_stats(struct ixl_pf *pf)
2247{
2248 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2249 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2250 pf->stat_offsets_loaded = false;
2251}
2252
2253/**
2254 * Resets all stats of the given vsi
2255 **/
2256void
2257ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2258{
2259 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2260 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2261 vsi->stat_offsets_loaded = false;
2262}
2263
2264/**
2265 * Read and update a 48 bit stat from the hw
2266 *
2267 * Since the device stats are not reset at PFReset, they likely will not
2268 * be zeroed when the driver starts. We'll save the first values read
2269 * and use them as offsets to be subtracted from the raw values in order
2270 * to report stats that count from zero.
2271 **/
2272void
2273ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2274 bool offset_loaded, u64 *offset, u64 *stat)
2275{
2276 u64 new_data;
2277
2278#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2279 new_data = rd64(hw, loreg);
2280#else
2281 /*
2282 * Use two rd32's instead of one rd64; FreeBSD versions before
2283 * 10 don't support 64-bit bus reads/writes.
2284 */
2285 new_data = rd32(hw, loreg);
2286 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2287#endif
2288
2289 if (!offset_loaded)
2290 *offset = new_data;
2291 if (new_data >= *offset)
2292 *stat = new_data - *offset;
2293 else
2294 *stat = (new_data + ((u64)1 << 48)) - *offset;
2295 *stat &= 0xFFFFFFFFFFFFULL;
2296}
2297
2298/**
2299 * Read and update a 32 bit stat from the hw
2300 **/
2301void
2302ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2303 bool offset_loaded, u64 *offset, u64 *stat)
2304{
2305 u32 new_data;
2306
2307 new_data = rd32(hw, reg);
2308 if (!offset_loaded)
2309 *offset = new_data;
2310 if (new_data >= *offset)
2311 *stat = (u32)(new_data - *offset);
2312 else
2313 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2314}
2315
2316/**
2317 * Add subset of device sysctls safe to use in recovery mode
2318 */
2319void
2320ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2321{
2322 device_t dev = pf->dev;
2323
2324 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2325 struct sysctl_oid_list *ctx_list =
2326 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2327
2328 struct sysctl_oid *debug_node;
2329 struct sysctl_oid_list *debug_list;
2330
2331 SYSCTL_ADD_PROC(ctx, ctx_list,
2332 OID_AUTO, "fw_version",
2333 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2334 ixl_sysctl_show_fw, "A", "Firmware version");
2335
2336 /* Add sysctls meant to print debug information, but don't list them
2337 * in "sysctl -a" output. */
2338 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2339 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2340 "Debug Sysctls");
2341 debug_list = SYSCTL_CHILDREN(debug_node);
2342
2343 SYSCTL_ADD_UINT(ctx, debug_list,
2344 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2345 &pf->hw.debug_mask, 0, "Shared code debug message level");
2346
2347 SYSCTL_ADD_UINT(ctx, debug_list,
2348 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2349 &pf->dbg_mask, 0, "Non-shared code debug message level");
2350
2351 SYSCTL_ADD_PROC(ctx, debug_list,
2352 OID_AUTO, "dump_debug_data",
2353 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2354 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2355
2356 SYSCTL_ADD_PROC(ctx, debug_list,
2357 OID_AUTO, "do_pf_reset",
2358 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2359 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2360
2361 SYSCTL_ADD_PROC(ctx, debug_list,
2362 OID_AUTO, "do_core_reset",
2363 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2364 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2365
2366 SYSCTL_ADD_PROC(ctx, debug_list,
2367 OID_AUTO, "do_global_reset",
2368 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2369 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2370
2371 SYSCTL_ADD_PROC(ctx, debug_list,
2372 OID_AUTO, "queue_interrupt_table",
2373 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2374 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2375}
2376
2377void
2378ixl_add_device_sysctls(struct ixl_pf *pf)
2379{
2380 device_t dev = pf->dev;
2381 struct i40e_hw *hw = &pf->hw;
2382
2383 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2384 struct sysctl_oid_list *ctx_list =
2385 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2386
2387 struct sysctl_oid *debug_node;
2388 struct sysctl_oid_list *debug_list;
2389
2390 struct sysctl_oid *fec_node;
2391 struct sysctl_oid_list *fec_list;
2392 struct sysctl_oid *eee_node;
2393 struct sysctl_oid_list *eee_list;
2394
2395 /* Set up sysctls */
2396 SYSCTL_ADD_PROC(ctx, ctx_list,
2397 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2398 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2399
2400 SYSCTL_ADD_PROC(ctx, ctx_list,
2401 OID_AUTO, "advertise_speed",
2402 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2403 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2404
2405 SYSCTL_ADD_PROC(ctx, ctx_list,
2406 OID_AUTO, "supported_speeds",
2407 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2408 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2409
2410 SYSCTL_ADD_PROC(ctx, ctx_list,
2411 OID_AUTO, "current_speed",
2412 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2413 ixl_sysctl_current_speed, "A", "Current Port Speed");
2414
2415 SYSCTL_ADD_PROC(ctx, ctx_list,
2416 OID_AUTO, "fw_version",
2417 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2418 ixl_sysctl_show_fw, "A", "Firmware version");
2419
2420 SYSCTL_ADD_PROC(ctx, ctx_list,
2421 OID_AUTO, "unallocated_queues",
2422 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2423 ixl_sysctl_unallocated_queues, "I",
2424 "Queues not allocated to a PF or VF");
2425
2426 SYSCTL_ADD_PROC(ctx, ctx_list,
2427 OID_AUTO, "tx_itr",
2428 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2429 ixl_sysctl_pf_tx_itr, "I",
2430 "Immediately set TX ITR value for all queues");
2431
2432 SYSCTL_ADD_PROC(ctx, ctx_list,
2433 OID_AUTO, "rx_itr",
2434 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2435 ixl_sysctl_pf_rx_itr, "I",
2436 "Immediately set RX ITR value for all queues");
2437
2438 SYSCTL_ADD_INT(ctx, ctx_list,
2439 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2440 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2441
2442 SYSCTL_ADD_INT(ctx, ctx_list,
2443 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2444 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2445
2446 /* Add FEC sysctls for 25G adapters */
2447 if (i40e_is_25G_device(hw->device_id)) {
2448 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2449 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2450 "FEC Sysctls");
2451 fec_list = SYSCTL_CHILDREN(fec_node);
2452
2453 SYSCTL_ADD_PROC(ctx, fec_list,
2454 OID_AUTO, "fc_ability",
2455 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2456 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2457
2458 SYSCTL_ADD_PROC(ctx, fec_list,
2459 OID_AUTO, "rs_ability",
2460 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2461 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2462
2463 SYSCTL_ADD_PROC(ctx, fec_list,
2464 OID_AUTO, "fc_requested",
2465 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2466 ixl_sysctl_fec_fc_request, "I",
2467 "FC FEC mode requested on link");
2468
2469 SYSCTL_ADD_PROC(ctx, fec_list,
2470 OID_AUTO, "rs_requested",
2471 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2472 ixl_sysctl_fec_rs_request, "I",
2473 "RS FEC mode requested on link");
2474
2475 SYSCTL_ADD_PROC(ctx, fec_list,
2476 OID_AUTO, "auto_fec_enabled",
2477 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2478 ixl_sysctl_fec_auto_enable, "I",
2479 "Let FW decide FEC ability/request modes");
2480 }
2481
2482 SYSCTL_ADD_PROC(ctx, ctx_list,
2483 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2484 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2485
2486 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2487 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2488 "Energy Efficient Ethernet (EEE) Sysctls");
2489 eee_list = SYSCTL_CHILDREN(eee_node);
2490
2491 SYSCTL_ADD_PROC(ctx, eee_list,
2492 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2493 pf, 0, ixl_sysctl_eee_enable, "I",
2494 "Enable Energy Efficient Ethernet (EEE)");
2495
2496 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2497 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2498 "TX LPI status");
2499
2500 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2501 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2502 "RX LPI status");
2503
2504 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2505 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2506 "TX LPI count");
2507
2508 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2509 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2510 "RX LPI count");
2511
2512 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2513 "link_active_on_if_down",
2514 CTLTYPE_INT | CTLFLAG_RWTUN,
2515 pf, 0, ixl_sysctl_set_link_active, "I",
2516 IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2517
2518 /* Add sysctls meant to print debug information, but don't list them
2519 * in "sysctl -a" output. */
2520 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2521 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2522 "Debug Sysctls");
2523 debug_list = SYSCTL_CHILDREN(debug_node);
2524
2525 SYSCTL_ADD_UINT(ctx, debug_list,
2526 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2527 &pf->hw.debug_mask, 0, "Shared code debug message level");
2528
2529 SYSCTL_ADD_UINT(ctx, debug_list,
2530 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2531 &pf->dbg_mask, 0, "Non-shared code debug message level");
2532
2533 SYSCTL_ADD_PROC(ctx, debug_list,
2534 OID_AUTO, "link_status",
2535 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2536 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2537
2538 SYSCTL_ADD_PROC(ctx, debug_list,
2539 OID_AUTO, "phy_abilities_init",
2540 CTLTYPE_STRING | CTLFLAG_RD,
2541 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2542
2543 SYSCTL_ADD_PROC(ctx, debug_list,
2544 OID_AUTO, "phy_abilities",
2545 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2546 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2547
2548 SYSCTL_ADD_PROC(ctx, debug_list,
2549 OID_AUTO, "filter_list",
2550 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2551 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2552
2553 SYSCTL_ADD_PROC(ctx, debug_list,
2554 OID_AUTO, "hw_res_alloc",
2555 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2556 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2557
2558 SYSCTL_ADD_PROC(ctx, debug_list,
2559 OID_AUTO, "switch_config",
2560 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2561 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2562
2563 SYSCTL_ADD_PROC(ctx, debug_list,
2564 OID_AUTO, "switch_vlans",
2565 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2566 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2567
2568 SYSCTL_ADD_PROC(ctx, debug_list,
2569 OID_AUTO, "rss_key",
2570 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2571 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2572
2573 SYSCTL_ADD_PROC(ctx, debug_list,
2574 OID_AUTO, "rss_lut",
2575 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2576 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2577
2578 SYSCTL_ADD_PROC(ctx, debug_list,
2579 OID_AUTO, "rss_hena",
2580 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2581 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2582
2583 SYSCTL_ADD_PROC(ctx, debug_list,
2584 OID_AUTO, "disable_fw_link_management",
2585 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2586 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2587
2588 SYSCTL_ADD_PROC(ctx, debug_list,
2589 OID_AUTO, "dump_debug_data",
2590 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2591 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2592
2593 SYSCTL_ADD_PROC(ctx, debug_list,
2594 OID_AUTO, "do_pf_reset",
2595 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2596 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2597
2598 SYSCTL_ADD_PROC(ctx, debug_list,
2599 OID_AUTO, "do_core_reset",
2600 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2601 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2602
2603 SYSCTL_ADD_PROC(ctx, debug_list,
2604 OID_AUTO, "do_global_reset",
2605 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2606 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2607
2608 SYSCTL_ADD_PROC(ctx, debug_list,
2609 OID_AUTO, "queue_interrupt_table",
2610 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2611 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2612
2613 if (pf->has_i2c) {
2614 SYSCTL_ADD_PROC(ctx, debug_list,
2615 OID_AUTO, "read_i2c_byte",
2616 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2617 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2618
2619 SYSCTL_ADD_PROC(ctx, debug_list,
2620 OID_AUTO, "write_i2c_byte",
2621 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2622 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2623
2624 SYSCTL_ADD_PROC(ctx, debug_list,
2625 OID_AUTO, "read_i2c_diag_data",
2626 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2627 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2628 }
2629}
2630
2631/*
2632 * Primarily for finding out how many queues can be assigned to VFs,
2633 * at runtime.
2634 */
2635static int
2636ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2637{
2638 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2639 int queues;
2640
2641 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2642
2643 return sysctl_handle_int(oidp, NULL, queues, req);
2644}
2645
2646static const char *
2647ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2648{
2649 const char * link_speed_str[] = {
2650 "Unknown",
2651 "100 Mbps",
2652 "1 Gbps",
2653 "10 Gbps",
2654 "40 Gbps",
2655 "20 Gbps",
2656 "25 Gbps",
2657 "2.5 Gbps",
2658 "5 Gbps"
2659 };
2660 int index;
2661
2662 switch (link_speed) {
2663 case I40E_LINK_SPEED_100MB:
2664 index = 1;
2665 break;
2666 case I40E_LINK_SPEED_1GB:
2667 index = 2;
2668 break;
2669 case I40E_LINK_SPEED_10GB:
2670 index = 3;
2671 break;
2672 case I40E_LINK_SPEED_40GB:
2673 index = 4;
2674 break;
2675 case I40E_LINK_SPEED_20GB:
2676 index = 5;
2677 break;
2678 case I40E_LINK_SPEED_25GB:
2679 index = 6;
2680 break;
2681 case I40E_LINK_SPEED_2_5GB:
2682 index = 7;
2683 break;
2684 case I40E_LINK_SPEED_5GB:
2685 index = 8;
2686 break;
2687 case I40E_LINK_SPEED_UNKNOWN:
2688 default:
2689 index = 0;
2690 break;
2691 }
2692
2693 return (link_speed_str[index]);
2694}
2695
2696int
2697ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2698{
2699 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2700 struct i40e_hw *hw = &pf->hw;
2701 int error = 0;
2702
2703 ixl_update_link_status(pf);
2704
2705 error = sysctl_handle_string(oidp,
2706 __DECONST(void *,
2707 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2708 8, req);
2709
2710 return (error);
2711}
2712
2713/*
2714 * Converts 8-bit speeds value to and from sysctl flags and
2715 * Admin Queue flags.
2716 */
2717static u8
2718ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2719{
2720#define SPEED_MAP_SIZE 8
2721 static u16 speedmap[SPEED_MAP_SIZE] = {
2722 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2723 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
2724 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
2725 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
2726 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
2727 (I40E_LINK_SPEED_40GB | (0x20 << 8)),
2728 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2729 (I40E_LINK_SPEED_5GB | (0x80 << 8)),
2730 };
2731 u8 retval = 0;
2732
2733 for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2734 if (to_aq)
2735 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2736 else
2737 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2738 }
2739
2740 return (retval);
2741}
2742
2743int
2744ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2745{
2746 struct i40e_hw *hw = &pf->hw;
2747 device_t dev = pf->dev;
2748 struct i40e_aq_get_phy_abilities_resp abilities;
2749 struct i40e_aq_set_phy_config config;
2750 enum i40e_status_code aq_error = 0;
2751
2752 /* Get current capability information */
2753 aq_error = i40e_aq_get_phy_capabilities(hw,
2754 FALSE, FALSE, &abilities, NULL);
2755 if (aq_error) {
2756 device_printf(dev,
2757 "%s: Error getting phy capabilities %d,"
2758 " aq error: %d\n", __func__, aq_error,
2759 hw->aq.asq_last_status);
2760 return (EIO);
2761 }
2762
2763 /* Prepare new config */
2764 bzero(&config, sizeof(config));
2765 if (from_aq)
2766 config.link_speed = speeds;
2767 else
2768 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2769 config.phy_type = abilities.phy_type;
2770 config.phy_type_ext = abilities.phy_type_ext;
2771 config.abilities = abilities.abilities
2772 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2773 config.eee_capability = abilities.eee_capability;
2774 config.eeer = abilities.eeer_val;
2775 config.low_power_ctrl = abilities.d3_lpan;
2776 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2777 & I40E_AQ_PHY_FEC_CONFIG_MASK;
2778
2779 /* Do aq command & restart link */
2780 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2781 if (aq_error) {
2782 device_printf(dev,
2783 "%s: Error setting new phy config %d,"
2784 " aq error: %d\n", __func__, aq_error,
2785 hw->aq.asq_last_status);
2786 return (EIO);
2787 }
2788
2789 return (0);
2790}
2791
2792/*
2793** Supported link speeds
2794** Flags:
2795** 0x1 - 100 Mb
2796** 0x2 - 1G
2797** 0x4 - 10G
2798** 0x8 - 20G
2799** 0x10 - 25G
2800** 0x20 - 40G
2801** 0x40 - 2.5G
2802** 0x80 - 5G
2803*/
2804static int
2805ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2806{
2807 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2808 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2809
2810 return sysctl_handle_int(oidp, NULL, supported, req);
2811}
2812
2813/*
2814** Control link advertise speed:
2815** Flags:
2816** 0x1 - advertise 100 Mb
2817** 0x2 - advertise 1G
2818** 0x4 - advertise 10G
2819** 0x8 - advertise 20G
2820** 0x10 - advertise 25G
2821** 0x20 - advertise 40G
2822** 0x40 - advertise 2.5G
2823** 0x80 - advertise 5G
2824**
2825** Set to 0 to disable link
2826*/
2827int
2828ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2829{
2830 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2831 device_t dev = pf->dev;
2832 u8 converted_speeds;
2833 int requested_ls = 0;
2834 int error = 0;
2835
2836 /* Read in new mode */
2837 requested_ls = pf->advertised_speed;
2838 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2839 if ((error) || (req->newptr == NULL))
2840 return (error);
2841 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2842 device_printf(dev, "Interface is currently in FW recovery mode. "
2843 "Setting advertise speed not supported\n");
2844 return (EINVAL);
2845 }
2846
2847 /* Error out if bits outside of possible flag range are set */
2848 if ((requested_ls & ~((u8)0xFF)) != 0) {
2849 device_printf(dev, "Input advertised speed out of range; "
2850 "valid flags are: 0x%02x\n",
2851 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2852 return (EINVAL);
2853 }
2854
2855 /* Check if adapter supports input value */
2856 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2857 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2858 device_printf(dev, "Invalid advertised speed; "
2859 "valid flags are: 0x%02x\n",
2860 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2861 return (EINVAL);
2862 }
2863
2864 error = ixl_set_advertised_speeds(pf, requested_ls, false);
2865 if (error)
2866 return (error);
2867
2868 pf->advertised_speed = requested_ls;
2869 ixl_update_link_status(pf);
2870 return (0);
2871}
2872
2873/*
2874 * Input: bitmap of enum i40e_aq_link_speed
2875 */
2876u64
2877ixl_max_aq_speed_to_value(u8 link_speeds)
2878{
2879 if (link_speeds & I40E_LINK_SPEED_40GB)
2880 return IF_Gbps(40);
2881 if (link_speeds & I40E_LINK_SPEED_25GB)
2882 return IF_Gbps(25);
2883 if (link_speeds & I40E_LINK_SPEED_20GB)
2884 return IF_Gbps(20);
2885 if (link_speeds & I40E_LINK_SPEED_10GB)
2886 return IF_Gbps(10);
2887 if (link_speeds & I40E_LINK_SPEED_5GB)
2888 return IF_Gbps(5);
2889 if (link_speeds & I40E_LINK_SPEED_2_5GB)
2890 return IF_Mbps(2500);
2891 if (link_speeds & I40E_LINK_SPEED_1GB)
2892 return IF_Gbps(1);
2893 if (link_speeds & I40E_LINK_SPEED_100MB)
2894 return IF_Mbps(100);
2895 else
2896 /* Minimum supported link speed */
2897 return IF_Mbps(100);
2898}
2899
2900/*
2901** Get the width and transaction speed of
2902** the bus this adapter is plugged into.
2903*/
2904void
2905ixl_get_bus_info(struct ixl_pf *pf)
2906{
2907 struct i40e_hw *hw = &pf->hw;
2908 device_t dev = pf->dev;
2909 u16 link;
2910 u32 offset, num_ports;
2911 u64 max_speed;
2912
2913 /* Some devices don't use PCIE */
2914 if (hw->mac.type == I40E_MAC_X722)
2915 return;
2916
2917 /* Read PCI Express Capabilities Link Status Register */
2918 pci_find_cap(dev, PCIY_EXPRESS, &offset);
2919 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2920
2921 /* Fill out hw struct with PCIE info */
2922 i40e_set_pci_config_data(hw, link);
2923
2924 /* Use info to print out bandwidth messages */
2925 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2926 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2927 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2928 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2929 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2930 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2931 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2932 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2933 ("Unknown"));
2934
2935 /*
2936 * If adapter is in slot with maximum supported speed,
2937 * no warning message needs to be printed out.
2938 */
2939 if (hw->bus.speed >= i40e_bus_speed_8000
2940 && hw->bus.width >= i40e_bus_width_pcie_x8)
2941 return;
2942
2943 num_ports = bitcount32(hw->func_caps.valid_functions);
2944 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2945
2946 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2947 device_printf(dev, "PCI-Express bandwidth available"
2948 " for this device may be insufficient for"
2949 " optimal performance.\n");
2950 device_printf(dev, "Please move the device to a different"
2951 " PCI-e link with more lanes and/or higher"
2952 " transfer rate.\n");
2953 }
2954}
2955
2956static int
2957ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2958{
2959 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2960 struct i40e_hw *hw = &pf->hw;
2961 struct sbuf *sbuf;
2962
2963 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2964 ixl_nvm_version_str(hw, sbuf);
2965 sbuf_finish(sbuf);
2966 sbuf_delete(sbuf);
2967
2968 return (0);
2969}
2970
2971void
2972ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2973{
2974 u8 nvma_ptr = nvma->config & 0xFF;
2975 u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2976 const char * cmd_str;
2977
2978 switch (nvma->command) {
2979 case I40E_NVM_READ:
2980 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2981 nvma->offset == 0 && nvma->data_size == 1) {
2982 device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2983 return;
2984 }
2985 cmd_str = "READ ";
2986 break;
2987 case I40E_NVM_WRITE:
2988 cmd_str = "WRITE";
2989 break;
2990 default:
2991 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2992 return;
2993 }
2994 device_printf(dev,
2995 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2996 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2997}
2998
2999int
3000ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3001{
3002 struct i40e_hw *hw = &pf->hw;
3003 struct i40e_nvm_access *nvma;
3004 device_t dev = pf->dev;
3005 enum i40e_status_code status = 0;
3006 size_t nvma_size, ifd_len, exp_len;
3007 int err, perrno;
3008
3009 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3010
3011 /* Sanity checks */
3012 nvma_size = sizeof(struct i40e_nvm_access);
3013 ifd_len = ifd->ifd_len;
3014
3015 if (ifd_len < nvma_size ||
3016 ifd->ifd_data == NULL) {
3017 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3018 __func__);
3019 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3020 __func__, ifd_len, nvma_size);
3021 device_printf(dev, "%s: data pointer: %p\n", __func__,
3022 ifd->ifd_data);
3023 return (EINVAL);
3024 }
3025
3026 nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3027 err = copyin(ifd->ifd_data, nvma, ifd_len);
3028 if (err) {
3029 device_printf(dev, "%s: Cannot get request from user space\n",
3030 __func__);
3031 free(nvma, M_IXL);
3032 return (err);
3033 }
3034
3035 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3036 ixl_print_nvm_cmd(dev, nvma);
3037
3038 if (IXL_PF_IS_RESETTING(pf)) {
3039 int count = 0;
3040 while (count++ < 100) {
3041 i40e_msec_delay(100);
3042 if (!(IXL_PF_IS_RESETTING(pf)))
3043 break;
3044 }
3045 }
3046
3047 if (IXL_PF_IS_RESETTING(pf)) {
3048 device_printf(dev,
3049 "%s: timeout waiting for EMP reset to finish\n",
3050 __func__);
3051 free(nvma, M_IXL);
3052 return (-EBUSY);
3053 }
3054
3055 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3056 device_printf(dev,
3057 "%s: invalid request, data size not in supported range\n",
3058 __func__);
3059 free(nvma, M_IXL);
3060 return (EINVAL);
3061 }
3062
3063 /*
3064 * Older versions of the NVM update tool don't set ifd_len to the size
3065 * of the entire buffer passed to the ioctl. Check the data_size field
3066 * in the contained i40e_nvm_access struct and ensure everything is
3067 * copied in from userspace.
3068 */
3069 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3070
3071 if (ifd_len < exp_len) {
3072 ifd_len = exp_len;
3073 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3074 err = copyin(ifd->ifd_data, nvma, ifd_len);
3075 if (err) {
3076 device_printf(dev, "%s: Cannot get request from user space\n",
3077 __func__);
3078 free(nvma, M_IXL);
3079 return (err);
3080 }
3081 }
3082
3083 // TODO: Might need a different lock here
3084 // IXL_PF_LOCK(pf);
3085 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3086 // IXL_PF_UNLOCK(pf);
3087
3088 err = copyout(nvma, ifd->ifd_data, ifd_len);
3089 free(nvma, M_IXL);
3090 if (err) {
3091 device_printf(dev, "%s: Cannot return data to user space\n",
3092 __func__);
3093 return (err);
3094 }
3095
3096 /* Let the nvmupdate report errors, show them only when debug is enabled */
3097 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3098 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3099 i40e_stat_str(hw, status), perrno);
3100
3101 /*
3102 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3103 * to run this ioctl again. So use -EACCES for -EPERM instead.
3104 */
3105 if (perrno == -EPERM)
3106 return (-EACCES);
3107 else
3108 return (perrno);
3109}
3110
3111int
3112ixl_find_i2c_interface(struct ixl_pf *pf)
3113{
3114 struct i40e_hw *hw = &pf->hw;
3115 bool i2c_en, port_matched;
3116 u32 reg;
3117
3118 for (int i = 0; i < 4; i++) {
3119 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3120 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3121 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3122 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3123 & BIT(hw->port);
3124 if (i2c_en && port_matched)
3125 return (i);
3126 }
3127
3128 return (-1);
3129}
3130
3131void
3132ixl_set_link(struct ixl_pf *pf, bool enable)
3133{
3134 struct i40e_hw *hw = &pf->hw;
3135 device_t dev = pf->dev;
3136 struct i40e_aq_get_phy_abilities_resp abilities;
3137 struct i40e_aq_set_phy_config config;
3138 enum i40e_status_code aq_error = 0;
3139 u32 phy_type, phy_type_ext;
3140
3141 /* Get initial capability information */
3142 aq_error = i40e_aq_get_phy_capabilities(hw,
3143 FALSE, TRUE, &abilities, NULL);
3144 if (aq_error) {
3145 device_printf(dev,
3146 "%s: Error getting phy capabilities %d,"
3147 " aq error: %d\n", __func__, aq_error,
3148 hw->aq.asq_last_status);
3149 return;
3150 }
3151
3152 phy_type = abilities.phy_type;
3153 phy_type_ext = abilities.phy_type_ext;
3154
3155 /* Get current capability information */
3156 aq_error = i40e_aq_get_phy_capabilities(hw,
3157 FALSE, FALSE, &abilities, NULL);
3158 if (aq_error) {
3159 device_printf(dev,
3160 "%s: Error getting phy capabilities %d,"
3161 " aq error: %d\n", __func__, aq_error,
3162 hw->aq.asq_last_status);
3163 return;
3164 }
3165
3166 /* Prepare new config */
3167 memset(&config, 0, sizeof(config));
3168 config.link_speed = abilities.link_speed;
3169 config.abilities = abilities.abilities;
3170 config.eee_capability = abilities.eee_capability;
3171 config.eeer = abilities.eeer_val;
3172 config.low_power_ctrl = abilities.d3_lpan;
3173 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3174 & I40E_AQ_PHY_FEC_CONFIG_MASK;
3175 config.phy_type = 0;
3176 config.phy_type_ext = 0;
3177
3178 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3179 I40E_AQ_PHY_FLAG_PAUSE_RX);
3180
3181 switch (pf->fc) {
3182 case I40E_FC_FULL:
3183 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3184 I40E_AQ_PHY_FLAG_PAUSE_RX;
3185 break;
3186 case I40E_FC_RX_PAUSE:
3187 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3188 break;
3189 case I40E_FC_TX_PAUSE:
3190 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3191 break;
3192 default:
3193 break;
3194 }
3195
3196 if (enable) {
3197 config.phy_type = phy_type;
3198 config.phy_type_ext = phy_type_ext;
3199
3200 }
3201
3202 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3203 if (aq_error) {
3204 device_printf(dev,
3205 "%s: Error setting new phy config %d,"
3206 " aq error: %d\n", __func__, aq_error,
3207 hw->aq.asq_last_status);
3208 return;
3209 }
3210
3211 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3212 if (aq_error) {
3213 device_printf(dev,
3214 "%s: Error set link config %d,"
3215 " aq error: %d\n", __func__, aq_error,
3216 hw->aq.asq_last_status);
3217 return;
3218 }
3219}
3220
3221static char *
3222ixl_phy_type_string(u32 bit_pos, bool ext)
3223{
3224 static char * phy_types_str[32] = {
3225 "SGMII",
3226 "1000BASE-KX",
3227 "10GBASE-KX4",
3228 "10GBASE-KR",
3229 "40GBASE-KR4",
3230 "XAUI",
3231 "XFI",
3232 "SFI",
3233 "XLAUI",
3234 "XLPPI",
3235 "40GBASE-CR4",
3236 "10GBASE-CR1",
3237 "SFP+ Active DA",
3238 "QSFP+ Active DA",
3239 "Reserved (14)",
3240 "Reserved (15)",
3241 "Reserved (16)",
3242 "100BASE-TX",
3243 "1000BASE-T",
3244 "10GBASE-T",
3245 "10GBASE-SR",
3246 "10GBASE-LR",
3247 "10GBASE-SFP+Cu",
3248 "10GBASE-CR1",
3249 "40GBASE-CR4",
3250 "40GBASE-SR4",
3251 "40GBASE-LR4",
3252 "1000BASE-SX",
3253 "1000BASE-LX",
3254 "1000BASE-T Optical",
3255 "20GBASE-KR2",
3256 "Reserved (31)"
3257 };
3258 static char * ext_phy_types_str[8] = {
3259 "25GBASE-KR",
3260 "25GBASE-CR",
3261 "25GBASE-SR",
3262 "25GBASE-LR",
3263 "25GBASE-AOC",
3264 "25GBASE-ACC",
3265 "2.5GBASE-T",
3266 "5GBASE-T"
3267 };
3268
3269 if (ext && bit_pos > 7) return "Invalid_Ext";
3270 if (bit_pos > 31) return "Invalid";
3271
3272 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3273}
3274
3275/* TODO: ERJ: I don't this is necessary anymore. */
3276int
3277ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3278{
3279 device_t dev = pf->dev;
3280 struct i40e_hw *hw = &pf->hw;
3281 struct i40e_aq_desc desc;
3282 enum i40e_status_code status;
3283
3284 struct i40e_aqc_get_link_status *aq_link_status =
3285 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3286
3287 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3288 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3289 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3290 if (status) {
3291 device_printf(dev,
3292 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3293 __func__, i40e_stat_str(hw, status),
3294 i40e_aq_str(hw, hw->aq.asq_last_status));
3295 return (EIO);
3296 }
3297
3298 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3299 return (0);
3300}
3301
3302static char *
3303ixl_phy_type_string_ls(u8 val)
3304{
3305 if (val >= 0x1F)
3306 return ixl_phy_type_string(val - 0x1F, true);
3307 else
3308 return ixl_phy_type_string(val, false);
3309}
3310
3311static int
3312ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3313{
3314 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3315 device_t dev = pf->dev;
3316 struct sbuf *buf;
3317 int error = 0;
3318
3319 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3320 if (!buf) {
3321 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3322 return (ENOMEM);
3323 }
3324
3325 struct i40e_aqc_get_link_status link_status;
3326 error = ixl_aq_get_link_status(pf, &link_status);
3327 if (error) {
3328 sbuf_delete(buf);
3329 return (error);
3330 }
3331
3332 sbuf_printf(buf, "\n"
3333 "PHY Type : 0x%02x<%s>\n"
3334 "Speed : 0x%02x\n"
3335 "Link info: 0x%02x\n"
3336 "AN info : 0x%02x\n"
3337 "Ext info : 0x%02x\n"
3338 "Loopback : 0x%02x\n"
3339 "Max Frame: %d\n"
3340 "Config : 0x%02x\n"
3341 "Power : 0x%02x",
3342 link_status.phy_type,
3343 ixl_phy_type_string_ls(link_status.phy_type),
3344 link_status.link_speed,
3345 link_status.link_info,
3346 link_status.an_info,
3347 link_status.ext_info,
3348 link_status.loopback,
3349 link_status.max_frame_size,
3350 link_status.config,
3351 link_status.power_desc);
3352
3353 error = sbuf_finish(buf);
3354 if (error)
3355 device_printf(dev, "Error finishing sbuf: %d\n", error);
3356
3357 sbuf_delete(buf);
3358 return (error);
3359}
3360
3361static int
3362ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3363{
3364 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3365 struct i40e_hw *hw = &pf->hw;
3366 device_t dev = pf->dev;
3367 enum i40e_status_code status;
3368 struct i40e_aq_get_phy_abilities_resp abilities;
3369 struct sbuf *buf;
3370 int error = 0;
3371
3372 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3373 if (!buf) {
3374 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3375 return (ENOMEM);
3376 }
3377
3378 status = i40e_aq_get_phy_capabilities(hw,
3379 FALSE, arg2 != 0, &abilities, NULL);
3380 if (status) {
3381 device_printf(dev,
3382 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3383 __func__, i40e_stat_str(hw, status),
3384 i40e_aq_str(hw, hw->aq.asq_last_status));
3385 sbuf_delete(buf);
3386 return (EIO);
3387 }
3388
3389 sbuf_printf(buf, "\n"
3390 "PHY Type : %08x",
3391 abilities.phy_type);
3392
3393 if (abilities.phy_type != 0) {
3394 sbuf_printf(buf, "<");
3395 for (int i = 0; i < 32; i++)
3396 if ((1 << i) & abilities.phy_type)
3397 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3398 sbuf_printf(buf, ">");
3399 }
3400
3401 sbuf_printf(buf, "\nPHY Ext : %02x",
3402 abilities.phy_type_ext);
3403
3404 if (abilities.phy_type_ext != 0) {
3405 sbuf_printf(buf, "<");
3406 for (int i = 0; i < 4; i++)
3407 if ((1 << i) & abilities.phy_type_ext)
3408 sbuf_printf(buf, "%s,",
3409 ixl_phy_type_string(i, true));
3410 sbuf_printf(buf, ">");
3411 }
3412
3413 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
3414 if (abilities.link_speed != 0) {
3415 u8 link_speed;
3416 sbuf_printf(buf, " <");
3417 for (int i = 0; i < 8; i++) {
3418 link_speed = (1 << i) & abilities.link_speed;
3419 if (link_speed)
3420 sbuf_printf(buf, "%s, ",
3421 ixl_link_speed_string(link_speed));
3422 }
3423 sbuf_printf(buf, ">");
3424 }
3425
3426 sbuf_printf(buf, "\n"
3427 "Abilities: %02x\n"
3428 "EEE cap : %04x\n"
3429 "EEER reg : %08x\n"
3430 "D3 Lpan : %02x\n"
3431 "ID : %02x %02x %02x %02x\n"
3432 "ModType : %02x %02x %02x\n"
3433 "ModType E: %01x\n"
3434 "FEC Cfg : %02x\n"
3435 "Ext CC : %02x",
3436 abilities.abilities, abilities.eee_capability,
3437 abilities.eeer_val, abilities.d3_lpan,
3438 abilities.phy_id[0], abilities.phy_id[1],
3439 abilities.phy_id[2], abilities.phy_id[3],
3440 abilities.module_type[0], abilities.module_type[1],
3441 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3442 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3443 abilities.ext_comp_code);
3444
3445 error = sbuf_finish(buf);
3446 if (error)
3447 device_printf(dev, "Error finishing sbuf: %d\n", error);
3448
3449 sbuf_delete(buf);
3450 return (error);
3451}
3452
3453static int
3454ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3455{
3456 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3457 struct ixl_vsi *vsi = &pf->vsi;
3458 struct ixl_mac_filter *f;
3459 device_t dev = pf->dev;
3460 int error = 0, ftl_len = 0, ftl_counter = 0;
3461
3462 struct sbuf *buf;
3463
3464 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3465 if (!buf) {
3466 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3467 return (ENOMEM);
3468 }
3469
3470 sbuf_printf(buf, "\n");
3471
3472 /* Print MAC filters */
3473 sbuf_printf(buf, "PF Filters:\n");
3474 LIST_FOREACH(f, &vsi->ftl, ftle)
3475 ftl_len++;
3476
3477 if (ftl_len < 1)
3478 sbuf_printf(buf, "(none)\n");
3479 else {
3480 LIST_FOREACH(f, &vsi->ftl, ftle) {
3481 sbuf_printf(buf,
3482 MAC_FORMAT ", vlan %4d, flags %#06x",
3483 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3484 /* don't print '\n' for last entry */
3485 if (++ftl_counter != ftl_len)
3486 sbuf_printf(buf, "\n");
3487 }
3488 }
3489
3490#ifdef PCI_IOV
3491 /* TODO: Give each VF its own filter list sysctl */
3492 struct ixl_vf *vf;
3493 if (pf->num_vfs > 0) {
3494 sbuf_printf(buf, "\n\n");
3495 for (int i = 0; i < pf->num_vfs; i++) {
3496 vf = &pf->vfs[i];
3497 if (!(vf->vf_flags & VF_FLAG_ENABLED))
3498 continue;
3499
3500 vsi = &vf->vsi;
3501 ftl_len = 0, ftl_counter = 0;
3502 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3503 LIST_FOREACH(f, &vsi->ftl, ftle)
3504 ftl_len++;
3505
3506 if (ftl_len < 1)
3507 sbuf_printf(buf, "(none)\n");
3508 else {
3509 LIST_FOREACH(f, &vsi->ftl, ftle) {
3510 sbuf_printf(buf,
3511 MAC_FORMAT ", vlan %4d, flags %#06x\n",
3512 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3513 }
3514 }
3515 }
3516 }
3517#endif
3518
3519 error = sbuf_finish(buf);
3520 if (error)
3521 device_printf(dev, "Error finishing sbuf: %d\n", error);
3522 sbuf_delete(buf);
3523
3524 return (error);
3525}
3526
3527#define IXL_SW_RES_SIZE 0x14
3528int
3529ixl_res_alloc_cmp(const void *a, const void *b)
3530{
3531 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3532 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3533 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3534
3535 return ((int)one->resource_type - (int)two->resource_type);
3536}
3537
3538/*
3539 * Longest string length: 25
3540 */
3541const char *
3542ixl_switch_res_type_string(u8 type)
3543{
3544 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3545 "VEB",
3546 "VSI",
3547 "Perfect Match MAC address",
3548 "S-tag",
3549 "(Reserved)",
3550 "Multicast hash entry",
3551 "Unicast hash entry",
3552 "VLAN",
3553 "VSI List entry",
3554 "(Reserved)",
3555 "VLAN Statistic Pool",
3556 "Mirror Rule",
3557 "Queue Set",
3558 "Inner VLAN Forward filter",
3559 "(Reserved)",
3560 "Inner MAC",
3561 "IP",
3562 "GRE/VN1 Key",
3563 "VN2 Key",
3564 "Tunneling Port"
3565 };
3566
3567 if (type < IXL_SW_RES_SIZE)
3568 return ixl_switch_res_type_strings[type];
3569 else
3570 return "(Reserved)";
3571}
3572
3573static int
3574ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3575{
3576 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3577 struct i40e_hw *hw = &pf->hw;
3578 device_t dev = pf->dev;
3579 struct sbuf *buf;
3580 enum i40e_status_code status;
3581 int error = 0;
3582
3583 u8 num_entries;
3584 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3585
3586 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3587 if (!buf) {
3588 device_printf(dev, "Could not allocate sbuf for output.\n");
3589 return (ENOMEM);
3590 }
3591
3592 bzero(resp, sizeof(resp));
3593 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3594 resp,
3595 IXL_SW_RES_SIZE,
3596 NULL);
3597 if (status) {
3598 device_printf(dev,
3599 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3600 __func__, i40e_stat_str(hw, status),
3601 i40e_aq_str(hw, hw->aq.asq_last_status));
3602 sbuf_delete(buf);
3603 return (error);
3604 }
3605
3606 /* Sort entries by type for display */
3607 qsort(resp, num_entries,
3608 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3609 &ixl_res_alloc_cmp);
3610
3611 sbuf_cat(buf, "\n");
3612 sbuf_printf(buf, "# of entries: %d\n", num_entries);
3613 sbuf_printf(buf,
3614 " Type | Guaranteed | Total | Used | Un-allocated\n"
3615 " | (this) | (all) | (this) | (all) \n");
3616 for (int i = 0; i < num_entries; i++) {
3617 sbuf_printf(buf,
3618 "%25s | %10d %5d %6d %12d",
3619 ixl_switch_res_type_string(resp[i].resource_type),
3620 resp[i].guaranteed,
3621 resp[i].total,
3622 resp[i].used,
3623 resp[i].total_unalloced);
3624 if (i < num_entries - 1)
3625 sbuf_cat(buf, "\n");
3626 }
3627
3628 error = sbuf_finish(buf);
3629 if (error)
3630 device_printf(dev, "Error finishing sbuf: %d\n", error);
3631
3632 sbuf_delete(buf);
3633 return (error);
3634}
3635
3636enum ixl_sw_seid_offset {
3637 IXL_SW_SEID_EMP = 1,
3638 IXL_SW_SEID_MAC_START = 2,
3639 IXL_SW_SEID_MAC_END = 5,
3640 IXL_SW_SEID_PF_START = 16,
3641 IXL_SW_SEID_PF_END = 31,
3642 IXL_SW_SEID_VF_START = 32,
3643 IXL_SW_SEID_VF_END = 159,
3644};
3645
3646/*
3647 * Caller must init and delete sbuf; this function will clear and
3648 * finish it for caller.
3649 *
3650 * Note: The SEID argument only applies for elements defined by FW at
3651 * power-on; these include the EMP, Ports, PFs and VFs.
3652 */
3653static char *
3654ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3655{
3656 sbuf_clear(s);
3657
3658 /* If SEID is in certain ranges, then we can infer the
3659 * mapping of SEID to switch element.
3660 */
3661 if (seid == IXL_SW_SEID_EMP) {
3662 sbuf_cat(s, "EMP");
3663 goto out;
3664 } else if (seid >= IXL_SW_SEID_MAC_START &&
3665 seid <= IXL_SW_SEID_MAC_END) {
3666 sbuf_printf(s, "MAC %2d",
3667 seid - IXL_SW_SEID_MAC_START);
3668 goto out;
3669 } else if (seid >= IXL_SW_SEID_PF_START &&
3670 seid <= IXL_SW_SEID_PF_END) {
3671 sbuf_printf(s, "PF %3d",
3672 seid - IXL_SW_SEID_PF_START);
3673 goto out;
3674 } else if (seid >= IXL_SW_SEID_VF_START &&
3675 seid <= IXL_SW_SEID_VF_END) {
3676 sbuf_printf(s, "VF %3d",
3677 seid - IXL_SW_SEID_VF_START);
3678 goto out;
3679 }
3680
3681 switch (element_type) {
3682 case I40E_AQ_SW_ELEM_TYPE_BMC:
3683 sbuf_cat(s, "BMC");
3684 break;
3685 case I40E_AQ_SW_ELEM_TYPE_PV:
3686 sbuf_cat(s, "PV");
3687 break;
3688 case I40E_AQ_SW_ELEM_TYPE_VEB:
3689 sbuf_cat(s, "VEB");
3690 break;
3691 case I40E_AQ_SW_ELEM_TYPE_PA:
3692 sbuf_cat(s, "PA");
3693 break;
3694 case I40E_AQ_SW_ELEM_TYPE_VSI:
3695 sbuf_printf(s, "VSI");
3696 break;
3697 default:
3698 sbuf_cat(s, "?");
3699 break;
3700 }
3701
3702out:
3703 sbuf_finish(s);
3704 return sbuf_data(s);
3705}
3706
3707static int
3708ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3709{
3710 const struct i40e_aqc_switch_config_element_resp *one, *two;
3711 one = (const struct i40e_aqc_switch_config_element_resp *)a;
3712 two = (const struct i40e_aqc_switch_config_element_resp *)b;
3713
3714 return ((int)one->seid - (int)two->seid);
3715}
3716
3717static int
3718ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3719{
3720 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3721 struct i40e_hw *hw = &pf->hw;
3722 device_t dev = pf->dev;
3723 struct sbuf *buf;
3724 struct sbuf *nmbuf;
3725 enum i40e_status_code status;
3726 int error = 0;
3727 u16 next = 0;
3728 u8 aq_buf[I40E_AQ_LARGE_BUF];
3729
3730 struct i40e_aqc_switch_config_element_resp *elem;
3731 struct i40e_aqc_get_switch_config_resp *sw_config;
3732 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3733
3734 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3735 if (!buf) {
3736 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3737 return (ENOMEM);
3738 }
3739
3740 status = i40e_aq_get_switch_config(hw, sw_config,
3741 sizeof(aq_buf), &next, NULL);
3742 if (status) {
3743 device_printf(dev,
3744 "%s: aq_get_switch_config() error %s, aq error %s\n",
3745 __func__, i40e_stat_str(hw, status),
3746 i40e_aq_str(hw, hw->aq.asq_last_status));
3747 sbuf_delete(buf);
3748 return error;
3749 }
3750 if (next)
3751 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3752 __func__, next);
3753
3754 nmbuf = sbuf_new_auto();
3755 if (!nmbuf) {
3756 device_printf(dev, "Could not allocate sbuf for name output.\n");
3757 sbuf_delete(buf);
3758 return (ENOMEM);
3759 }
3760
3761 /* Sort entries by SEID for display */
3762 qsort(sw_config->element, sw_config->header.num_reported,
3763 sizeof(struct i40e_aqc_switch_config_element_resp),
3764 &ixl_sw_cfg_elem_seid_cmp);
3765
3766 sbuf_cat(buf, "\n");
3767 /* Assuming <= 255 elements in switch */
3768 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3769 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3770 /* Exclude:
3771 * Revision -- all elements are revision 1 for now
3772 */
3773 sbuf_printf(buf,
3774 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n"
3775 " | | | (uplink)\n");
3776 for (int i = 0; i < sw_config->header.num_reported; i++) {
3777 elem = &sw_config->element[i];
3778
3779 // "%4d (%8s) | %8s %8s %#8x",
3780 sbuf_printf(buf, "%4d", elem->seid);
3781 sbuf_cat(buf, " ");
3782 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3783 elem->element_type, elem->seid));
3784 sbuf_cat(buf, " | ");
3785 sbuf_printf(buf, "%4d", elem->uplink_seid);
3786 sbuf_cat(buf, " ");
3787 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3788 0, elem->uplink_seid));
3789 sbuf_cat(buf, " | ");
3790 sbuf_printf(buf, "%4d", elem->downlink_seid);
3791 sbuf_cat(buf, " ");
3792 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3793 0, elem->downlink_seid));
3794 sbuf_cat(buf, " | ");
3795 sbuf_printf(buf, "%8d", elem->connection_type);
3796 if (i < sw_config->header.num_reported - 1)
3797 sbuf_cat(buf, "\n");
3798 }
3799 sbuf_delete(nmbuf);
3800
3801 error = sbuf_finish(buf);
3802 if (error)
3803 device_printf(dev, "Error finishing sbuf: %d\n", error);
3804
3805 sbuf_delete(buf);
3806
3807 return (error);
3808}
3809
3810static int
3811ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3812{
3813 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3814 struct i40e_hw *hw = &pf->hw;
3815 device_t dev = pf->dev;
3816 int requested_vlan = -1;
3817 enum i40e_status_code status = 0;
3818 int error = 0;
3819
3820 error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3821 if ((error) || (req->newptr == NULL))
3822 return (error);
3823
3824 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3825 device_printf(dev, "Flags disallow setting of vlans\n");
3826 return (ENODEV);
3827 }
3828
3829 hw->switch_tag = requested_vlan;
3830 device_printf(dev,
3831 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3832 hw->switch_tag, hw->first_tag, hw->second_tag);
3833 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3834 if (status) {
3835 device_printf(dev,
3836 "%s: aq_set_switch_config() error %s, aq error %s\n",
3837 __func__, i40e_stat_str(hw, status),
3838 i40e_aq_str(hw, hw->aq.asq_last_status));
3839 return (status);
3840 }
3841 return (0);
3842}
3843
3844static int
3845ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3846{
3847 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3848 struct i40e_hw *hw = &pf->hw;
3849 device_t dev = pf->dev;
3850 struct sbuf *buf;
3851 int error = 0;
3852 enum i40e_status_code status;
3853 u32 reg;
3854
3855 struct i40e_aqc_get_set_rss_key_data key_data;
3856
3857 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3858 if (!buf) {
3859 device_printf(dev, "Could not allocate sbuf for output.\n");
3860 return (ENOMEM);
3861 }
3862
3863 bzero(&key_data, sizeof(key_data));
3864
3865 sbuf_cat(buf, "\n");
3866 if (hw->mac.type == I40E_MAC_X722) {
3867 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3868 if (status)
3869 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3870 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3871 } else {
3872 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3873 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3874 bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
3875 }
3876 }
3877
3878 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3879
3880 error = sbuf_finish(buf);
3881 if (error)
3882 device_printf(dev, "Error finishing sbuf: %d\n", error);
3883 sbuf_delete(buf);
3884
3885 return (error);
3886}
3887
3888static void
3889ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3890{
3891 int i, j, k, width;
3892 char c;
3893
3894 if (length < 1 || buf == NULL) return;
3895
3896 int byte_stride = 16;
3897 int lines = length / byte_stride;
3898 int rem = length % byte_stride;
3899 if (rem > 0)
3900 lines++;
3901
3902 for (i = 0; i < lines; i++) {
3903 width = (rem > 0 && i == lines - 1)
3904 ? rem : byte_stride;
3905
3906 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3907
3908 for (j = 0; j < width; j++)
3909 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3910
3911 if (width < byte_stride) {
3912 for (k = 0; k < (byte_stride - width); k++)
3913 sbuf_printf(sb, " ");
3914 }
3915
3916 if (!text) {
3917 sbuf_printf(sb, "\n");
3918 continue;
3919 }
3920
3921 for (j = 0; j < width; j++) {
3922 c = (char)buf[i * byte_stride + j];
3923 if (c < 32 || c > 126)
3924 sbuf_printf(sb, ".");
3925 else
3926 sbuf_printf(sb, "%c", c);
3927
3928 if (j == width - 1)
3929 sbuf_printf(sb, "\n");
3930 }
3931 }
3932}
3933
3934static int
3935ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3936{
3937 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3938 struct i40e_hw *hw = &pf->hw;
3939 device_t dev = pf->dev;
3940 struct sbuf *buf;
3941 int error = 0;
3942 enum i40e_status_code status;
3943 u8 hlut[512];
3944 u32 reg;
3945
3946 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3947 if (!buf) {
3948 device_printf(dev, "Could not allocate sbuf for output.\n");
3949 return (ENOMEM);
3950 }
3951
3952 bzero(hlut, sizeof(hlut));
3953 sbuf_cat(buf, "\n");
3954 if (hw->mac.type == I40E_MAC_X722) {
3955 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3956 if (status)
3957 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3958 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3959 } else {
3960 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3961 reg = rd32(hw, I40E_PFQF_HLUT(i));
3962 bcopy(&reg, &hlut[i << 2], 4);
3963 }
3964 }
3965 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3966
3967 error = sbuf_finish(buf);
3968 if (error)
3969 device_printf(dev, "Error finishing sbuf: %d\n", error);
3970 sbuf_delete(buf);
3971
3972 return (error);
3973}
3974
3975static int
3976ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3977{
3978 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3979 struct i40e_hw *hw = &pf->hw;
3980 u64 hena;
3981
3982 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3983 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3984
3985 return sysctl_handle_long(oidp, NULL, hena, req);
3986}
3987
3988/*
3989 * Sysctl to disable firmware's link management
3990 *
3991 * 1 - Disable link management on this port
3992 * 0 - Re-enable link management
3993 *
3994 * On normal NVMs, firmware manages link by default.
3995 */
3996static int
3997ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3998{
3999 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4000 struct i40e_hw *hw = &pf->hw;
4001 device_t dev = pf->dev;
4002 int requested_mode = -1;
4003 enum i40e_status_code status = 0;
4004 int error = 0;
4005
4006 /* Read in new mode */
4007 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4008 if ((error) || (req->newptr == NULL))
4009 return (error);
4010 /* Check for sane value */
4011 if (requested_mode < 0 || requested_mode > 1) {
4012 device_printf(dev, "Valid modes are 0 or 1\n");
4013 return (EINVAL);
4014 }
4015
4016 /* Set new mode */
4017 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4018 if (status) {
4019 device_printf(dev,
4020 "%s: Error setting new phy debug mode %s,"
4021 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4022 i40e_aq_str(hw, hw->aq.asq_last_status));
4023 return (EIO);
4024 }
4025
4026 return (0);
4027}
4028
4029/*
4030 * Read some diagnostic data from a (Q)SFP+ module
4031 *
4032 * SFP A2 QSFP Lower Page
4033 * Temperature 96-97 22-23
4034 * Vcc 98-99 26-27
4035 * TX power 102-103 34-35..40-41
4036 * RX power 104-105 50-51..56-57
4037 */
4038static int
4039ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4040{
4041 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4042 device_t dev = pf->dev;
4043 struct sbuf *sbuf;
4044 int error = 0;
4045 u8 output;
4046
4047 if (req->oldptr == NULL) {
4048 error = SYSCTL_OUT(req, 0, 128);
4049 return (0);
4050 }
4051
4052 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4053 if (error) {
4054 device_printf(dev, "Error reading from i2c\n");
4055 return (error);
4056 }
4057
4058 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4059 if (output == 0x3) {
4060 /*
4061 * Check for:
4062 * - Internally calibrated data
4063 * - Diagnostic monitoring is implemented
4064 */
4065 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4066 if (!(output & 0x60)) {
4067 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4068 return (0);
4069 }
4070
4071 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4072
4073 for (u8 offset = 96; offset < 100; offset++) {
4074 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4075 sbuf_printf(sbuf, "%02X ", output);
4076 }
4077 for (u8 offset = 102; offset < 106; offset++) {
4078 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4079 sbuf_printf(sbuf, "%02X ", output);
4080 }
4081 } else if (output == 0xD || output == 0x11) {
4082 /*
4083 * QSFP+ modules are always internally calibrated, and must indicate
4084 * what types of diagnostic monitoring are implemented
4085 */
4086 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4087
4088 for (u8 offset = 22; offset < 24; offset++) {
4089 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4090 sbuf_printf(sbuf, "%02X ", output);
4091 }
4092 for (u8 offset = 26; offset < 28; offset++) {
4093 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4094 sbuf_printf(sbuf, "%02X ", output);
4095 }
4096 /* Read the data from the first lane */
4097 for (u8 offset = 34; offset < 36; offset++) {
4098 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4099 sbuf_printf(sbuf, "%02X ", output);
4100 }
4101 for (u8 offset = 50; offset < 52; offset++) {
4102 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4103 sbuf_printf(sbuf, "%02X ", output);
4104 }
4105 } else {
4106 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4107 return (0);
4108 }
4109
4110 sbuf_finish(sbuf);
4111 sbuf_delete(sbuf);
4112
4113 return (0);
4114}
4115
4116/*
4117 * Sysctl to read a byte from I2C bus.
4118 *
4119 * Input: 32-bit value:
4120 * bits 0-7: device address (0xA0 or 0xA2)
4121 * bits 8-15: offset (0-255)
4122 * bits 16-31: unused
4123 * Output: 8-bit value read
4124 */
4125static int
4126ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4127{
4128 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4129 device_t dev = pf->dev;
4130 int input = -1, error = 0;
4131 u8 dev_addr, offset, output;
4132
4133 /* Read in I2C read parameters */
4134 error = sysctl_handle_int(oidp, &input, 0, req);
4135 if ((error) || (req->newptr == NULL))
4136 return (error);
4137 /* Validate device address */
4138 dev_addr = input & 0xFF;
4139 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4140 return (EINVAL);
4141 }
4142 offset = (input >> 8) & 0xFF;
4143
4144 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4145 if (error)
4146 return (error);
4147
4148 device_printf(dev, "%02X\n", output);
4149 return (0);
4150}
4151
4152/*
4153 * Sysctl to write a byte to the I2C bus.
4154 *
4155 * Input: 32-bit value:
4156 * bits 0-7: device address (0xA0 or 0xA2)
4157 * bits 8-15: offset (0-255)
4158 * bits 16-23: value to write
4159 * bits 24-31: unused
4160 * Output: 8-bit value written
4161 */
4162static int
4163ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4164{
4165 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4166 device_t dev = pf->dev;
4167 int input = -1, error = 0;
4168 u8 dev_addr, offset, value;
4169
4170 /* Read in I2C write parameters */
4171 error = sysctl_handle_int(oidp, &input, 0, req);
4172 if ((error) || (req->newptr == NULL))
4173 return (error);
4174 /* Validate device address */
4175 dev_addr = input & 0xFF;
4176 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4177 return (EINVAL);
4178 }
4179 offset = (input >> 8) & 0xFF;
4180 value = (input >> 16) & 0xFF;
4181
4182 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4183 if (error)
4184 return (error);
4185
4186 device_printf(dev, "%02X written\n", value);
4187 return (0);
4188}
4189
4190static int
4191ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4192 u8 bit_pos, int *is_set)
4193{
4194 device_t dev = pf->dev;
4195 struct i40e_hw *hw = &pf->hw;
4196 enum i40e_status_code status;
4197
4198 if (IXL_PF_IN_RECOVERY_MODE(pf))
4199 return (EIO);
4200
4201 status = i40e_aq_get_phy_capabilities(hw,
4202 FALSE, FALSE, abilities, NULL);
4203 if (status) {
4204 device_printf(dev,
4205 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4206 __func__, i40e_stat_str(hw, status),
4207 i40e_aq_str(hw, hw->aq.asq_last_status));
4208 return (EIO);
4209 }
4210
4211 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4212 return (0);
4213}
4214
4215static int
4216ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4217 u8 bit_pos, int set)
4218{
4219 device_t dev = pf->dev;
4220 struct i40e_hw *hw = &pf->hw;
4221 struct i40e_aq_set_phy_config config;
4222 enum i40e_status_code status;
4223
4224 /* Set new PHY config */
4225 memset(&config, 0, sizeof(config));
4226 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4227 if (set)
4228 config.fec_config |= bit_pos;
4229 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4230 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4231 config.phy_type = abilities->phy_type;
4232 config.phy_type_ext = abilities->phy_type_ext;
4233 config.link_speed = abilities->link_speed;
4234 config.eee_capability = abilities->eee_capability;
4235 config.eeer = abilities->eeer_val;
4236 config.low_power_ctrl = abilities->d3_lpan;
4237 status = i40e_aq_set_phy_config(hw, &config, NULL);
4238
4239 if (status) {
4240 device_printf(dev,
4241 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4242 __func__, i40e_stat_str(hw, status),
4243 i40e_aq_str(hw, hw->aq.asq_last_status));
4244 return (EIO);
4245 }
4246 }
4247
4248 return (0);
4249}
4250
4251static int
4252ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4253{
4254 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4255 int mode, error = 0;
4256
4257 struct i40e_aq_get_phy_abilities_resp abilities;
4258 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4259 if (error)
4260 return (error);
4261 /* Read in new mode */
4262 error = sysctl_handle_int(oidp, &mode, 0, req);
4263 if ((error) || (req->newptr == NULL))
4264 return (error);
4265
4266 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4267}
4268
4269static int
4270ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4271{
4272 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4273 int mode, error = 0;
4274
4275 struct i40e_aq_get_phy_abilities_resp abilities;
4276 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4277 if (error)
4278 return (error);
4279 /* Read in new mode */
4280 error = sysctl_handle_int(oidp, &mode, 0, req);
4281 if ((error) || (req->newptr == NULL))
4282 return (error);
4283
4284 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4285}
4286
4287static int
4288ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4289{
4290 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4291 int mode, error = 0;
4292
4293 struct i40e_aq_get_phy_abilities_resp abilities;
4294 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4295 if (error)
4296 return (error);
4297 /* Read in new mode */
4298 error = sysctl_handle_int(oidp, &mode, 0, req);
4299 if ((error) || (req->newptr == NULL))
4300 return (error);
4301
4302 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4303}
4304
4305static int
4306ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4307{
4308 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4309 int mode, error = 0;
4310
4311 struct i40e_aq_get_phy_abilities_resp abilities;
4312 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4313 if (error)
4314 return (error);
4315 /* Read in new mode */
4316 error = sysctl_handle_int(oidp, &mode, 0, req);
4317 if ((error) || (req->newptr == NULL))
4318 return (error);
4319
4320 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4321}
4322
4323static int
4324ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4325{
4326 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4327 int mode, error = 0;
4328
4329 struct i40e_aq_get_phy_abilities_resp abilities;
4330 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4331 if (error)
4332 return (error);
4333 /* Read in new mode */
4334 error = sysctl_handle_int(oidp, &mode, 0, req);
4335 if ((error) || (req->newptr == NULL))
4336 return (error);
4337
4338 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4339}
4340
4341static int
4342ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4343{
4344 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4345 struct i40e_hw *hw = &pf->hw;
4346 device_t dev = pf->dev;
4347 struct sbuf *buf;
4348 int error = 0;
4349 enum i40e_status_code status;
4350
4351 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4352 if (!buf) {
4353 device_printf(dev, "Could not allocate sbuf for output.\n");
4354 return (ENOMEM);
4355 }
4356
4357 u8 *final_buff;
4358 /* This amount is only necessary if reading the entire cluster into memory */
4359#define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4360 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4361 if (final_buff == NULL) {
4362 device_printf(dev, "Could not allocate memory for output.\n");
4363 goto out;
4364 }
4365 int final_buff_len = 0;
4366
4367 u8 cluster_id = 1;
4368 bool more = true;
4369
4370 u8 dump_buf[4096];
4371 u16 curr_buff_size = 4096;
4372 u8 curr_next_table = 0;
4373 u32 curr_next_index = 0;
4374
4375 u16 ret_buff_size;
4376 u8 ret_next_table;
4377 u32 ret_next_index;
4378
4379 sbuf_cat(buf, "\n");
4380
4381 while (more) {
4382 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4383 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4384 if (status) {
4385 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4386 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4387 goto free_out;
4388 }
4389
4390 /* copy info out of temp buffer */
4391 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4392 final_buff_len += ret_buff_size;
4393
4394 if (ret_next_table != curr_next_table) {
4395 /* We're done with the current table; we can dump out read data. */
4396 sbuf_printf(buf, "%d:", curr_next_table);
4397 int bytes_printed = 0;
4398 while (bytes_printed <= final_buff_len) {
4399 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4400 bytes_printed += 16;
4401 }
4402 sbuf_cat(buf, "\n");
4403
4404 /* The entire cluster has been read; we're finished */
4405 if (ret_next_table == 0xFF)
4406 break;
4407
4408 /* Otherwise clear the output buffer and continue reading */
4409 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4410 final_buff_len = 0;
4411 }
4412
4413 if (ret_next_index == 0xFFFFFFFF)
4414 ret_next_index = 0;
4415
4416 bzero(dump_buf, sizeof(dump_buf));
4417 curr_next_table = ret_next_table;
4418 curr_next_index = ret_next_index;
4419 }
4420
4421free_out:
4422 free(final_buff, M_IXL);
4423out:
4424 error = sbuf_finish(buf);
4425 if (error)
4426 device_printf(dev, "Error finishing sbuf: %d\n", error);
4427 sbuf_delete(buf);
4428
4429 return (error);
4430}
4431
4432static int
4433ixl_start_fw_lldp(struct ixl_pf *pf)
4434{
4435 struct i40e_hw *hw = &pf->hw;
4436 enum i40e_status_code status;
4437
4438 status = i40e_aq_start_lldp(hw, false, NULL);
4439 if (status != I40E_SUCCESS) {
4440 switch (hw->aq.asq_last_status) {
4441 case I40E_AQ_RC_EEXIST:
4442 device_printf(pf->dev,
4443 "FW LLDP agent is already running\n");
4444 break;
4445 case I40E_AQ_RC_EPERM:
4446 device_printf(pf->dev,
4447 "Device configuration forbids SW from starting "
4448 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4449 "attribute to \"Enabled\" to use this sysctl\n");
4450 return (EINVAL);
4451 default:
4452 device_printf(pf->dev,
4453 "Starting FW LLDP agent failed: error: %s, %s\n",
4454 i40e_stat_str(hw, status),
4455 i40e_aq_str(hw, hw->aq.asq_last_status));
4456 return (EINVAL);
4457 }
4458 }
4459
4460 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4461 return (0);
4462}
4463
4464static int
4465ixl_stop_fw_lldp(struct ixl_pf *pf)
4466{
4467 struct i40e_hw *hw = &pf->hw;
4468 device_t dev = pf->dev;
4469 enum i40e_status_code status;
4470
4471 if (hw->func_caps.npar_enable != 0) {
4472 device_printf(dev,
4473 "Disabling FW LLDP agent is not supported on this device\n");
4474 return (EINVAL);
4475 }
4476
4477 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4478 device_printf(dev,
4479 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4480 return (EINVAL);
4481 }
4482
4483 status = i40e_aq_stop_lldp(hw, true, false, NULL);
4484 if (status != I40E_SUCCESS) {
4485 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4486 device_printf(dev,
4487 "Disabling FW LLDP agent failed: error: %s, %s\n",
4488 i40e_stat_str(hw, status),
4489 i40e_aq_str(hw, hw->aq.asq_last_status));
4490 return (EINVAL);
4491 }
4492
4493 device_printf(dev, "FW LLDP agent is already stopped\n");
4494 }
4495
4496 i40e_aq_set_dcb_parameters(hw, true, NULL);
4497 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4498 return (0);
4499}
4500
4501static int
4502ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4503{
4504 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4505 int state, new_state, error = 0;
4506
4507 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4508
4509 /* Read in new mode */
4510 error = sysctl_handle_int(oidp, &new_state, 0, req);
4511 if ((error) || (req->newptr == NULL))
4512 return (error);
4513
4514 /* Already in requested state */
4515 if (new_state == state)
4516 return (error);
4517
4518 if (new_state == 0)
4519 return ixl_stop_fw_lldp(pf);
4520
4521 return ixl_start_fw_lldp(pf);
4522}
4523
4524static int
4525ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4526{
4527 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4528 int state, new_state;
4529 int sysctl_handle_status = 0;
4530 enum i40e_status_code cmd_status;
4531
4532 /* Init states' values */
4533 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4534
4535 /* Get requested mode */
4536 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4537 if ((sysctl_handle_status) || (req->newptr == NULL))
4538 return (sysctl_handle_status);
4539
4540 /* Check if state has changed */
4541 if (new_state == state)
4542 return (0);
4543
4544 /* Set new state */
4545 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4546
4547 /* Save new state or report error */
4548 if (!cmd_status) {
4549 if (new_state == 0)
4550 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4551 else
4552 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4553 } else if (cmd_status == I40E_ERR_CONFIG)
4554 return (EPERM);
4555 else
4556 return (EIO);
4557
4558 return (0);
4559}
4560
4561static int
4562ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4563{
4564 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4565 int error, state;
4566
4567 state = !!(atomic_load_acq_32(&pf->state) &
4568 IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4569
4570 error = sysctl_handle_int(oidp, &state, 0, req);
4571 if ((error) || (req->newptr == NULL))
4572 return (error);
4573
4574 if (state == 0)
4575 atomic_clear_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4576 else
4577 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4578
4579 return (0);
4580}
4581
4582
4583int
4584ixl_attach_get_link_status(struct ixl_pf *pf)
4585{
4586 struct i40e_hw *hw = &pf->hw;
4587 device_t dev = pf->dev;
4588 int error = 0;
4589
4590 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4591 (hw->aq.fw_maj_ver < 4)) {
4592 i40e_msec_delay(75);
4593 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4594 if (error) {
4595 device_printf(dev, "link restart failed, aq_err=%d\n",
4596 pf->hw.aq.asq_last_status);
4597 return error;
4598 }
4599 }
4600
4601 /* Determine link state */
4602 hw->phy.get_link_info = TRUE;
4603 i40e_get_link_status(hw, &pf->link_up);
4604
4605 /* Flow Control mode not set by user, read current FW settings */
4606 if (pf->fc == -1)
4607 pf->fc = hw->fc.current_mode;
4608
4609 return (0);
4610}
4611
4612static int
4613ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4614{
4615 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4616 int requested = 0, error = 0;
4617
4618 /* Read in new mode */
4619 error = sysctl_handle_int(oidp, &requested, 0, req);
4620 if ((error) || (req->newptr == NULL))
4621 return (error);
4622
4623 /* Initiate the PF reset later in the admin task */
4624 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4625
4626 return (error);
4627}
4628
4629static int
4630ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4631{
4632 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4633 struct i40e_hw *hw = &pf->hw;
4634 int requested = 0, error = 0;
4635
4636 /* Read in new mode */
4637 error = sysctl_handle_int(oidp, &requested, 0, req);
4638 if ((error) || (req->newptr == NULL))
4639 return (error);
4640
4641 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4642
4643 return (error);
4644}
4645
4646static int
4647ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4648{
4649 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4650 struct i40e_hw *hw = &pf->hw;
4651 int requested = 0, error = 0;
4652
4653 /* Read in new mode */
4654 error = sysctl_handle_int(oidp, &requested, 0, req);
4655 if ((error) || (req->newptr == NULL))
4656 return (error);
4657
4658 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4659
4660 return (error);
4661}
4662
4663/*
4664 * Print out mapping of TX queue indexes and Rx queue indexes
4665 * to MSI-X vectors.
4666 */
4667static int
4668ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4669{
4670 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4671 struct ixl_vsi *vsi = &pf->vsi;
4672 device_t dev = pf->dev;
4673 struct sbuf *buf;
4674 int error = 0;
4675
4676 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4677 struct ixl_tx_queue *tx_que = vsi->tx_queues;
4678
4679 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4680 if (!buf) {
4681 device_printf(dev, "Could not allocate sbuf for output.\n");
4682 return (ENOMEM);
4683 }
4684
4685 sbuf_cat(buf, "\n");
4686 for (int i = 0; i < vsi->num_rx_queues; i++) {
4687 rx_que = &vsi->rx_queues[i];
4688 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4689 }
4690 for (int i = 0; i < vsi->num_tx_queues; i++) {
4691 tx_que = &vsi->tx_queues[i];
4692 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4693 }
4694
4695 error = sbuf_finish(buf);
4696 if (error)
4697 device_printf(dev, "Error finishing sbuf: %d\n", error);
4698 sbuf_delete(buf);
4699
4700 return (error);
4701}