xref: /freebsd/sys/dev/ice/ice_common.c (revision 2a58b312)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2022, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 #include "ice_sched.h"
35 #include "ice_adminq_cmd.h"
36 
37 #include "ice_flow.h"
38 #include "ice_switch.h"
39 
40 #define ICE_PF_RESET_WAIT_COUNT	300
41 
42 static const char * const ice_link_mode_str_low[] = {
43 	[0] = "100BASE_TX",
44 	[1] = "100M_SGMII",
45 	[2] = "1000BASE_T",
46 	[3] = "1000BASE_SX",
47 	[4] = "1000BASE_LX",
48 	[5] = "1000BASE_KX",
49 	[6] = "1G_SGMII",
50 	[7] = "2500BASE_T",
51 	[8] = "2500BASE_X",
52 	[9] = "2500BASE_KX",
53 	[10] = "5GBASE_T",
54 	[11] = "5GBASE_KR",
55 	[12] = "10GBASE_T",
56 	[13] = "10G_SFI_DA",
57 	[14] = "10GBASE_SR",
58 	[15] = "10GBASE_LR",
59 	[16] = "10GBASE_KR_CR1",
60 	[17] = "10G_SFI_AOC_ACC",
61 	[18] = "10G_SFI_C2C",
62 	[19] = "25GBASE_T",
63 	[20] = "25GBASE_CR",
64 	[21] = "25GBASE_CR_S",
65 	[22] = "25GBASE_CR1",
66 	[23] = "25GBASE_SR",
67 	[24] = "25GBASE_LR",
68 	[25] = "25GBASE_KR",
69 	[26] = "25GBASE_KR_S",
70 	[27] = "25GBASE_KR1",
71 	[28] = "25G_AUI_AOC_ACC",
72 	[29] = "25G_AUI_C2C",
73 	[30] = "40GBASE_CR4",
74 	[31] = "40GBASE_SR4",
75 	[32] = "40GBASE_LR4",
76 	[33] = "40GBASE_KR4",
77 	[34] = "40G_XLAUI_AOC_ACC",
78 	[35] = "40G_XLAUI",
79 	[36] = "50GBASE_CR2",
80 	[37] = "50GBASE_SR2",
81 	[38] = "50GBASE_LR2",
82 	[39] = "50GBASE_KR2",
83 	[40] = "50G_LAUI2_AOC_ACC",
84 	[41] = "50G_LAUI2",
85 	[42] = "50G_AUI2_AOC_ACC",
86 	[43] = "50G_AUI2",
87 	[44] = "50GBASE_CP",
88 	[45] = "50GBASE_SR",
89 	[46] = "50GBASE_FR",
90 	[47] = "50GBASE_LR",
91 	[48] = "50GBASE_KR_PAM4",
92 	[49] = "50G_AUI1_AOC_ACC",
93 	[50] = "50G_AUI1",
94 	[51] = "100GBASE_CR4",
95 	[52] = "100GBASE_SR4",
96 	[53] = "100GBASE_LR4",
97 	[54] = "100GBASE_KR4",
98 	[55] = "100G_CAUI4_AOC_ACC",
99 	[56] = "100G_CAUI4",
100 	[57] = "100G_AUI4_AOC_ACC",
101 	[58] = "100G_AUI4",
102 	[59] = "100GBASE_CR_PAM4",
103 	[60] = "100GBASE_KR_PAM4",
104 	[61] = "100GBASE_CP2",
105 	[62] = "100GBASE_SR2",
106 	[63] = "100GBASE_DR",
107 };
108 
109 static const char * const ice_link_mode_str_high[] = {
110 	[0] = "100GBASE_KR2_PAM4",
111 	[1] = "100G_CAUI2_AOC_ACC",
112 	[2] = "100G_CAUI2",
113 	[3] = "100G_AUI2_AOC_ACC",
114 	[4] = "100G_AUI2",
115 };
116 
117 /**
118  * ice_dump_phy_type - helper function to dump phy_type
119  * @hw: pointer to the HW structure
120  * @low: 64 bit value for phy_type_low
121  * @high: 64 bit value for phy_type_high
122  * @prefix: prefix string to differentiate multiple dumps
123  */
124 static void
125 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
126 {
127 	u32 i;
128 
129 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
130 		  (unsigned long long)low);
131 
132 	for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) {
133 		if (low & BIT_ULL(i))
134 			ice_debug(hw, ICE_DBG_PHY, "%s:   bit(%d): %s\n",
135 				  prefix, i, ice_link_mode_str_low[i]);
136 	}
137 
138 	ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
139 		  (unsigned long long)high);
140 
141 	for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) {
142 		if (high & BIT_ULL(i))
143 			ice_debug(hw, ICE_DBG_PHY, "%s:   bit(%d): %s\n",
144 				  prefix, i, ice_link_mode_str_high[i]);
145 	}
146 }
147 
148 /**
149  * ice_set_mac_type - Sets MAC type
150  * @hw: pointer to the HW structure
151  *
152  * This function sets the MAC type of the adapter based on the
153  * vendor ID and device ID stored in the HW structure.
154  */
155 enum ice_status ice_set_mac_type(struct ice_hw *hw)
156 {
157 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
158 
159 	if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
160 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
161 
162 	switch (hw->device_id) {
163 	case ICE_DEV_ID_E810C_BACKPLANE:
164 	case ICE_DEV_ID_E810C_QSFP:
165 	case ICE_DEV_ID_E810C_SFP:
166 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
167 	case ICE_DEV_ID_E810_XXV_QSFP:
168 	case ICE_DEV_ID_E810_XXV_SFP:
169 		hw->mac_type = ICE_MAC_E810;
170 		break;
171 	case ICE_DEV_ID_E822C_10G_BASE_T:
172 	case ICE_DEV_ID_E822C_BACKPLANE:
173 	case ICE_DEV_ID_E822C_QSFP:
174 	case ICE_DEV_ID_E822C_SFP:
175 	case ICE_DEV_ID_E822C_SGMII:
176 	case ICE_DEV_ID_E822L_10G_BASE_T:
177 	case ICE_DEV_ID_E822L_BACKPLANE:
178 	case ICE_DEV_ID_E822L_SFP:
179 	case ICE_DEV_ID_E822L_SGMII:
180 	case ICE_DEV_ID_E823L_10G_BASE_T:
181 	case ICE_DEV_ID_E823L_1GBE:
182 	case ICE_DEV_ID_E823L_BACKPLANE:
183 	case ICE_DEV_ID_E823L_QSFP:
184 	case ICE_DEV_ID_E823L_SFP:
185 	case ICE_DEV_ID_E823C_10G_BASE_T:
186 	case ICE_DEV_ID_E823C_BACKPLANE:
187 	case ICE_DEV_ID_E823C_QSFP:
188 	case ICE_DEV_ID_E823C_SFP:
189 	case ICE_DEV_ID_E823C_SGMII:
190 		hw->mac_type = ICE_MAC_GENERIC;
191 		break;
192 	default:
193 		hw->mac_type = ICE_MAC_UNKNOWN;
194 		break;
195 	}
196 
197 	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
198 	return ICE_SUCCESS;
199 }
200 
201 /**
202  * ice_is_e810
203  * @hw: pointer to the hardware structure
204  *
205  * returns true if the device is E810 based, false if not.
206  */
207 bool ice_is_e810(struct ice_hw *hw)
208 {
209 	return hw->mac_type == ICE_MAC_E810;
210 }
211 
212 /**
213  * ice_is_e810t
214  * @hw: pointer to the hardware structure
215  *
216  * returns true if the device is E810T based, false if not.
217  */
218 bool ice_is_e810t(struct ice_hw *hw)
219 {
220 	switch (hw->device_id) {
221 	case ICE_DEV_ID_E810C_SFP:
222 		switch (hw->subsystem_device_id) {
223 		case ICE_SUBDEV_ID_E810T:
224 		case ICE_SUBDEV_ID_E810T2:
225 		case ICE_SUBDEV_ID_E810T3:
226 		case ICE_SUBDEV_ID_E810T4:
227 		case ICE_SUBDEV_ID_E810T5:
228 		case ICE_SUBDEV_ID_E810T7:
229 			return true;
230 		}
231 		break;
232 	case ICE_DEV_ID_E810C_QSFP:
233 		switch (hw->subsystem_device_id) {
234 		case ICE_SUBDEV_ID_E810T2:
235 		case ICE_SUBDEV_ID_E810T5:
236 		case ICE_SUBDEV_ID_E810T6:
237 			return true;
238 		}
239 		break;
240 	default:
241 		break;
242 	}
243 
244 	return false;
245 }
246 
247 /**
248  * ice_is_e823
249  * @hw: pointer to the hardware structure
250  *
251  * returns true if the device is E823-L or E823-C based, false if not.
252  */
253 bool ice_is_e823(struct ice_hw *hw)
254 {
255 	switch (hw->device_id) {
256 	case ICE_DEV_ID_E823L_BACKPLANE:
257 	case ICE_DEV_ID_E823L_SFP:
258 	case ICE_DEV_ID_E823L_10G_BASE_T:
259 	case ICE_DEV_ID_E823L_1GBE:
260 	case ICE_DEV_ID_E823L_QSFP:
261 	case ICE_DEV_ID_E823C_BACKPLANE:
262 	case ICE_DEV_ID_E823C_QSFP:
263 	case ICE_DEV_ID_E823C_SFP:
264 	case ICE_DEV_ID_E823C_10G_BASE_T:
265 	case ICE_DEV_ID_E823C_SGMII:
266 		return true;
267 	default:
268 		return false;
269 	}
270 }
271 
272 /**
273  * ice_clear_pf_cfg - Clear PF configuration
274  * @hw: pointer to the hardware structure
275  *
276  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
277  * configuration, flow director filters, etc.).
278  */
279 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
280 {
281 	struct ice_aq_desc desc;
282 
283 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
284 
285 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
286 }
287 
288 /**
289  * ice_aq_manage_mac_read - manage MAC address read command
290  * @hw: pointer to the HW struct
291  * @buf: a virtual buffer to hold the manage MAC read response
292  * @buf_size: Size of the virtual buffer
293  * @cd: pointer to command details structure or NULL
294  *
295  * This function is used to return per PF station MAC address (0x0107).
296  * NOTE: Upon successful completion of this command, MAC address information
297  * is returned in user specified buffer. Please interpret user specified
298  * buffer as "manage_mac_read" response.
299  * Response such as various MAC addresses are stored in HW struct (port.mac)
300  * ice_discover_dev_caps is expected to be called before this function is
301  * called.
302  */
303 enum ice_status
304 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
305 		       struct ice_sq_cd *cd)
306 {
307 	struct ice_aqc_manage_mac_read_resp *resp;
308 	struct ice_aqc_manage_mac_read *cmd;
309 	struct ice_aq_desc desc;
310 	enum ice_status status;
311 	u16 flags;
312 	u8 i;
313 
314 	cmd = &desc.params.mac_read;
315 
316 	if (buf_size < sizeof(*resp))
317 		return ICE_ERR_BUF_TOO_SHORT;
318 
319 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
320 
321 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
322 	if (status)
323 		return status;
324 
325 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
326 	flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
327 
328 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
329 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
330 		return ICE_ERR_CFG;
331 	}
332 
333 	/* A single port can report up to two (LAN and WoL) addresses */
334 	for (i = 0; i < cmd->num_addr; i++)
335 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
336 			ice_memcpy(hw->port_info->mac.lan_addr,
337 				   resp[i].mac_addr, ETH_ALEN,
338 				   ICE_NONDMA_TO_NONDMA);
339 			ice_memcpy(hw->port_info->mac.perm_addr,
340 				   resp[i].mac_addr,
341 				   ETH_ALEN, ICE_NONDMA_TO_NONDMA);
342 			break;
343 		}
344 	return ICE_SUCCESS;
345 }
346 
347 /**
348  * ice_aq_get_phy_caps - returns PHY capabilities
349  * @pi: port information structure
350  * @qual_mods: report qualified modules
351  * @report_mode: report mode capabilities
352  * @pcaps: structure for PHY capabilities to be filled
353  * @cd: pointer to command details structure or NULL
354  *
355  * Returns the various PHY capabilities supported on the Port (0x0600)
356  */
357 enum ice_status
358 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
359 		    struct ice_aqc_get_phy_caps_data *pcaps,
360 		    struct ice_sq_cd *cd)
361 {
362 	struct ice_aqc_get_phy_caps *cmd;
363 	u16 pcaps_size = sizeof(*pcaps);
364 	struct ice_aq_desc desc;
365 	enum ice_status status;
366 	const char *prefix;
367 	struct ice_hw *hw;
368 
369 	cmd = &desc.params.get_phy;
370 
371 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
372 		return ICE_ERR_PARAM;
373 	hw = pi->hw;
374 
375 	if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
376 	    !ice_fw_supports_report_dflt_cfg(hw))
377 		return ICE_ERR_PARAM;
378 
379 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
380 
381 	if (qual_mods)
382 		cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
383 
384 	cmd->param0 |= CPU_TO_LE16(report_mode);
385 
386 	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
387 
388 	ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
389 
390 	switch (report_mode) {
391 	case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
392 		prefix = "phy_caps_media";
393 		break;
394 	case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
395 		prefix = "phy_caps_no_media";
396 		break;
397 	case ICE_AQC_REPORT_ACTIVE_CFG:
398 		prefix = "phy_caps_active";
399 		break;
400 	case ICE_AQC_REPORT_DFLT_CFG:
401 		prefix = "phy_caps_default";
402 		break;
403 	default:
404 		prefix = "phy_caps_invalid";
405 	}
406 
407 	ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low),
408 			  LE64_TO_CPU(pcaps->phy_type_high), prefix);
409 
410 	ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
411 		  prefix, report_mode);
412 	ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
413 	ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
414 		  pcaps->low_power_ctrl_an);
415 	ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
416 		  pcaps->eee_cap);
417 	ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
418 		  pcaps->eeer_value);
419 	ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
420 		  pcaps->link_fec_options);
421 	ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
422 		  prefix, pcaps->module_compliance_enforcement);
423 	ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
424 		  prefix, pcaps->extended_compliance_code);
425 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
426 		  pcaps->module_type[0]);
427 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
428 		  pcaps->module_type[1]);
429 	ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
430 		  pcaps->module_type[2]);
431 
432 	if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
433 		pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
434 		pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
435 		ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
436 			   sizeof(pi->phy.link_info.module_type),
437 			   ICE_NONDMA_TO_NONDMA);
438 	}
439 
440 	return status;
441 }
442 
443 /**
444  * ice_aq_get_netlist_node
445  * @hw: pointer to the hw struct
446  * @cmd: get_link_topo AQ structure
447  * @node_part_number: output node part number if node found
448  * @node_handle: output node handle parameter if node found
449  */
450 enum ice_status
451 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
452 			u8 *node_part_number, u16 *node_handle)
453 {
454 	struct ice_aq_desc desc;
455 
456 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
457 	desc.params.get_link_topo = *cmd;
458 
459 	if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
460 		return ICE_ERR_NOT_SUPPORTED;
461 
462 	if (node_handle)
463 		*node_handle =
464 			LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
465 	if (node_part_number)
466 		*node_part_number = desc.params.get_link_topo.node_part_num;
467 
468 	return ICE_SUCCESS;
469 }
470 
471 #define MAX_NETLIST_SIZE 10
472 /**
473  * ice_find_netlist_node
474  * @hw: pointer to the hw struct
475  * @node_type_ctx: type of netlist node to look for
476  * @node_part_number: node part number to look for
477  * @node_handle: output parameter if node found - optional
478  *
479  * Find and return the node handle for a given node type and part number in the
480  * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
481  * otherwise. If node_handle provided, it would be set to found node handle.
482  */
483 enum ice_status
484 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
485 		      u16 *node_handle)
486 {
487 	struct ice_aqc_get_link_topo cmd;
488 	u8 rec_node_part_number;
489 	u16 rec_node_handle;
490 	u8 idx;
491 
492 	for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
493 		enum ice_status status;
494 
495 		memset(&cmd, 0, sizeof(cmd));
496 
497 		cmd.addr.topo_params.node_type_ctx =
498 			(node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
499 		cmd.addr.topo_params.index = idx;
500 
501 		status = ice_aq_get_netlist_node(hw, &cmd,
502 						 &rec_node_part_number,
503 						 &rec_node_handle);
504 		if (status)
505 			return status;
506 
507 		if (rec_node_part_number == node_part_number) {
508 			if (node_handle)
509 				*node_handle = rec_node_handle;
510 			return ICE_SUCCESS;
511 		}
512 	}
513 
514 	return ICE_ERR_DOES_NOT_EXIST;
515 }
516 
517 /**
518  * ice_is_media_cage_present
519  * @pi: port information structure
520  *
521  * Returns true if media cage is present, else false. If no cage, then
522  * media type is backplane or BASE-T.
523  */
524 static bool ice_is_media_cage_present(struct ice_port_info *pi)
525 {
526 	struct ice_aqc_get_link_topo *cmd;
527 	struct ice_aq_desc desc;
528 
529 	cmd = &desc.params.get_link_topo;
530 
531 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
532 
533 	cmd->addr.topo_params.node_type_ctx =
534 		(ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
535 		 ICE_AQC_LINK_TOPO_NODE_CTX_S);
536 
537 	/* set node type */
538 	cmd->addr.topo_params.node_type_ctx |=
539 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
540 		 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE);
541 
542 	/* Node type cage can be used to determine if cage is present. If AQC
543 	 * returns error (ENOENT), then no cage present. If no cage present then
544 	 * connection type is backplane or BASE-T.
545 	 */
546 	return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL);
547 }
548 
549 /**
550  * ice_get_media_type - Gets media type
551  * @pi: port information structure
552  */
553 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
554 {
555 	struct ice_link_status *hw_link_info;
556 
557 	if (!pi)
558 		return ICE_MEDIA_UNKNOWN;
559 
560 	hw_link_info = &pi->phy.link_info;
561 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
562 		/* If more than one media type is selected, report unknown */
563 		return ICE_MEDIA_UNKNOWN;
564 
565 	if (hw_link_info->phy_type_low) {
566 		/* 1G SGMII is a special case where some DA cable PHYs
567 		 * may show this as an option when it really shouldn't
568 		 * be since SGMII is meant to be between a MAC and a PHY
569 		 * in a backplane. Try to detect this case and handle it
570 		 */
571 		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
572 		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
573 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
574 		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
575 		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
576 			return ICE_MEDIA_DA;
577 
578 		switch (hw_link_info->phy_type_low) {
579 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
580 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
581 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
582 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
583 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
584 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
585 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
586 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
587 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
588 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
589 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
590 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
591 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
592 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
593 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
594 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
595 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
596 			return ICE_MEDIA_FIBER;
597 		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
598 		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
599 		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
600 		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
601 		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
602 		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
603 		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
604 		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
605 			return ICE_MEDIA_FIBER;
606 		case ICE_PHY_TYPE_LOW_100BASE_TX:
607 		case ICE_PHY_TYPE_LOW_1000BASE_T:
608 		case ICE_PHY_TYPE_LOW_2500BASE_T:
609 		case ICE_PHY_TYPE_LOW_5GBASE_T:
610 		case ICE_PHY_TYPE_LOW_10GBASE_T:
611 		case ICE_PHY_TYPE_LOW_25GBASE_T:
612 			return ICE_MEDIA_BASET;
613 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
614 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
615 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
616 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
617 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
618 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
619 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
620 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
621 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
622 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
623 			return ICE_MEDIA_DA;
624 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
625 		case ICE_PHY_TYPE_LOW_40G_XLAUI:
626 		case ICE_PHY_TYPE_LOW_50G_LAUI2:
627 		case ICE_PHY_TYPE_LOW_50G_AUI2:
628 		case ICE_PHY_TYPE_LOW_50G_AUI1:
629 		case ICE_PHY_TYPE_LOW_100G_AUI4:
630 		case ICE_PHY_TYPE_LOW_100G_CAUI4:
631 			if (ice_is_media_cage_present(pi))
632 				return ICE_MEDIA_AUI;
633 			/* fall-through */
634 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
635 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
636 		case ICE_PHY_TYPE_LOW_2500BASE_X:
637 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
638 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
639 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
640 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
641 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
642 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
643 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
644 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
645 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
646 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
647 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
648 			return ICE_MEDIA_BACKPLANE;
649 		}
650 	} else {
651 		switch (hw_link_info->phy_type_high) {
652 		case ICE_PHY_TYPE_HIGH_100G_AUI2:
653 		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
654 			if (ice_is_media_cage_present(pi))
655 				return ICE_MEDIA_AUI;
656 			/* fall-through */
657 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
658 			return ICE_MEDIA_BACKPLANE;
659 		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
660 		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
661 			return ICE_MEDIA_FIBER;
662 		}
663 	}
664 	return ICE_MEDIA_UNKNOWN;
665 }
666 
667 #define ice_get_link_status_datalen(hw)	ICE_GET_LINK_STATUS_DATALEN_V1
668 
669 /**
670  * ice_aq_get_link_info
671  * @pi: port information structure
672  * @ena_lse: enable/disable LinkStatusEvent reporting
673  * @link: pointer to link status structure - optional
674  * @cd: pointer to command details structure or NULL
675  *
676  * Get Link Status (0x607). Returns the link status of the adapter.
677  */
678 enum ice_status
679 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
680 		     struct ice_link_status *link, struct ice_sq_cd *cd)
681 {
682 	struct ice_aqc_get_link_status_data link_data = { 0 };
683 	struct ice_aqc_get_link_status *resp;
684 	struct ice_link_status *li_old, *li;
685 	enum ice_media_type *hw_media_type;
686 	struct ice_fc_info *hw_fc_info;
687 	bool tx_pause, rx_pause;
688 	struct ice_aq_desc desc;
689 	enum ice_status status;
690 	struct ice_hw *hw;
691 	u16 cmd_flags;
692 
693 	if (!pi)
694 		return ICE_ERR_PARAM;
695 	hw = pi->hw;
696 
697 	li_old = &pi->phy.link_info_old;
698 	hw_media_type = &pi->phy.media_type;
699 	li = &pi->phy.link_info;
700 	hw_fc_info = &pi->fc;
701 
702 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
703 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
704 	resp = &desc.params.get_link_status;
705 	resp->cmd_flags = CPU_TO_LE16(cmd_flags);
706 	resp->lport_num = pi->lport;
707 
708 	status = ice_aq_send_cmd(hw, &desc, &link_data,
709 				 ice_get_link_status_datalen(hw), cd);
710 	if (status != ICE_SUCCESS)
711 		return status;
712 
713 	/* save off old link status information */
714 	*li_old = *li;
715 
716 	/* update current link status information */
717 	li->link_speed = LE16_TO_CPU(link_data.link_speed);
718 	li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
719 	li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
720 	*hw_media_type = ice_get_media_type(pi);
721 	li->link_info = link_data.link_info;
722 	li->link_cfg_err = link_data.link_cfg_err;
723 	li->an_info = link_data.an_info;
724 	li->ext_info = link_data.ext_info;
725 	li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
726 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
727 	li->topo_media_conflict = link_data.topo_media_conflict;
728 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
729 				      ICE_AQ_CFG_PACING_TYPE_M);
730 
731 	/* update fc info */
732 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
733 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
734 	if (tx_pause && rx_pause)
735 		hw_fc_info->current_mode = ICE_FC_FULL;
736 	else if (tx_pause)
737 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
738 	else if (rx_pause)
739 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
740 	else
741 		hw_fc_info->current_mode = ICE_FC_NONE;
742 
743 	li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
744 
745 	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
746 	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
747 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
748 		  (unsigned long long)li->phy_type_low);
749 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
750 		  (unsigned long long)li->phy_type_high);
751 	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
752 	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
753 	ice_debug(hw, ICE_DBG_LINK, "	link_cfg_err = 0x%x\n", li->link_cfg_err);
754 	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
755 	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
756 	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
757 	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
758 	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
759 		  li->max_frame_size);
760 	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
761 
762 	/* save link status information */
763 	if (link)
764 		*link = *li;
765 
766 	/* flag cleared so calling functions don't call AQ again */
767 	pi->phy.get_link_info = false;
768 
769 	return ICE_SUCCESS;
770 }
771 
772 /**
773  * ice_fill_tx_timer_and_fc_thresh
774  * @hw: pointer to the HW struct
775  * @cmd: pointer to MAC cfg structure
776  *
777  * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
778  * descriptor
779  */
780 static void
781 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
782 				struct ice_aqc_set_mac_cfg *cmd)
783 {
784 	u16 fc_thres_val, tx_timer_val;
785 	u32 val;
786 
787 	/* We read back the transmit timer and fc threshold value of
788 	 * LFC. Thus, we will use index =
789 	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
790 	 *
791 	 * Also, because we are operating on transmit timer and fc
792 	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
793 	 */
794 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
795 
796 	/* Retrieve the transmit timer */
797 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
798 	tx_timer_val = val &
799 		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
800 	cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
801 
802 	/* Retrieve the fc threshold */
803 	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
804 	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
805 
806 	cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
807 }
808 
809 /**
810  * ice_aq_set_mac_cfg
811  * @hw: pointer to the HW struct
812  * @max_frame_size: Maximum Frame Size to be supported
813  * @auto_drop: Tell HW to drop packets if TC queue is blocked
814  * @cd: pointer to command details structure or NULL
815  *
816  * Set MAC configuration (0x0603)
817  */
818 enum ice_status
819 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
820 		   struct ice_sq_cd *cd)
821 {
822 	struct ice_aqc_set_mac_cfg *cmd;
823 	struct ice_aq_desc desc;
824 
825 	cmd = &desc.params.set_mac_cfg;
826 
827 	if (max_frame_size == 0)
828 		return ICE_ERR_PARAM;
829 
830 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
831 
832 	cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
833 
834 	if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
835 		cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
836 	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
837 
838 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
839 }
840 
841 /**
842  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
843  * @hw: pointer to the HW struct
844  */
845 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
846 {
847 	struct ice_switch_info *sw;
848 	enum ice_status status;
849 
850 	hw->switch_info = (struct ice_switch_info *)
851 			  ice_malloc(hw, sizeof(*hw->switch_info));
852 
853 	sw = hw->switch_info;
854 
855 	if (!sw)
856 		return ICE_ERR_NO_MEMORY;
857 
858 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
859 	sw->prof_res_bm_init = 0;
860 
861 	status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
862 	if (status) {
863 		ice_free(hw, hw->switch_info);
864 		return status;
865 	}
866 	return ICE_SUCCESS;
867 }
868 
869 /**
870  * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
871  * @hw: pointer to the HW struct
872  * @sw: pointer to switch info struct for which function clears filters
873  */
874 static void
875 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
876 {
877 	struct ice_vsi_list_map_info *v_pos_map;
878 	struct ice_vsi_list_map_info *v_tmp_map;
879 	struct ice_sw_recipe *recps;
880 	u8 i;
881 
882 	if (!sw)
883 		return;
884 
885 	LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
886 				 ice_vsi_list_map_info, list_entry) {
887 		LIST_DEL(&v_pos_map->list_entry);
888 		ice_free(hw, v_pos_map);
889 	}
890 	recps = sw->recp_list;
891 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
892 		struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
893 
894 		recps[i].root_rid = i;
895 		LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
896 					 &recps[i].rg_list, ice_recp_grp_entry,
897 					 l_entry) {
898 			LIST_DEL(&rg_entry->l_entry);
899 			ice_free(hw, rg_entry);
900 		}
901 
902 		if (recps[i].adv_rule) {
903 			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
904 			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
905 
906 			ice_destroy_lock(&recps[i].filt_rule_lock);
907 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
908 						 &recps[i].filt_rules,
909 						 ice_adv_fltr_mgmt_list_entry,
910 						 list_entry) {
911 				LIST_DEL(&lst_itr->list_entry);
912 				ice_free(hw, lst_itr->lkups);
913 				ice_free(hw, lst_itr);
914 			}
915 		} else {
916 			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
917 
918 			ice_destroy_lock(&recps[i].filt_rule_lock);
919 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
920 						 &recps[i].filt_rules,
921 						 ice_fltr_mgmt_list_entry,
922 						 list_entry) {
923 				LIST_DEL(&lst_itr->list_entry);
924 				ice_free(hw, lst_itr);
925 			}
926 		}
927 		if (recps[i].root_buf)
928 			ice_free(hw, recps[i].root_buf);
929 	}
930 	ice_rm_sw_replay_rule_info(hw, sw);
931 	ice_free(hw, sw->recp_list);
932 	ice_free(hw, sw);
933 }
934 
935 /**
936  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
937  * @hw: pointer to the HW struct
938  */
939 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
940 {
941 	ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
942 }
943 
944 /**
945  * ice_get_itr_intrl_gran
946  * @hw: pointer to the HW struct
947  *
948  * Determines the ITR/INTRL granularities based on the maximum aggregate
949  * bandwidth according to the device's configuration during power-on.
950  */
951 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
952 {
953 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
954 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
955 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
956 
957 	switch (max_agg_bw) {
958 	case ICE_MAX_AGG_BW_200G:
959 	case ICE_MAX_AGG_BW_100G:
960 	case ICE_MAX_AGG_BW_50G:
961 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
962 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
963 		break;
964 	case ICE_MAX_AGG_BW_25G:
965 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
966 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
967 		break;
968 	}
969 }
970 
971 /**
972  * ice_print_rollback_msg - print FW rollback message
973  * @hw: pointer to the hardware structure
974  */
975 void ice_print_rollback_msg(struct ice_hw *hw)
976 {
977 	char nvm_str[ICE_NVM_VER_LEN] = { 0 };
978 	struct ice_orom_info *orom;
979 	struct ice_nvm_info *nvm;
980 
981 	orom = &hw->flash.orom;
982 	nvm = &hw->flash.nvm;
983 
984 	SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
985 		 nvm->major, nvm->minor, nvm->eetrack, orom->major,
986 		 orom->build, orom->patch);
987 	ice_warn(hw,
988 		 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
989 		 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
990 }
991 
992 /**
993  * ice_set_umac_shared
994  * @hw: pointer to the hw struct
995  *
996  * Set boolean flag to allow unicast MAC sharing
997  */
998 void ice_set_umac_shared(struct ice_hw *hw)
999 {
1000 	hw->umac_shared = true;
1001 }
1002 
1003 /**
1004  * ice_init_hw - main hardware initialization routine
1005  * @hw: pointer to the hardware structure
1006  */
1007 enum ice_status ice_init_hw(struct ice_hw *hw)
1008 {
1009 	struct ice_aqc_get_phy_caps_data *pcaps;
1010 	enum ice_status status;
1011 	u16 mac_buf_len;
1012 	void *mac_buf;
1013 
1014 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1015 
1016 	/* Set MAC type based on DeviceID */
1017 	status = ice_set_mac_type(hw);
1018 	if (status)
1019 		return status;
1020 
1021 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
1022 			 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
1023 		PF_FUNC_RID_FUNCTION_NUMBER_S;
1024 
1025 	status = ice_reset(hw, ICE_RESET_PFR);
1026 	if (status)
1027 		return status;
1028 	ice_get_itr_intrl_gran(hw);
1029 
1030 	status = ice_create_all_ctrlq(hw);
1031 	if (status)
1032 		goto err_unroll_cqinit;
1033 
1034 	ice_fwlog_set_support_ena(hw);
1035 	status = ice_fwlog_set(hw, &hw->fwlog_cfg);
1036 	if (status) {
1037 		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n",
1038 			  status);
1039 	} else {
1040 		if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) {
1041 			status = ice_fwlog_register(hw);
1042 			if (status)
1043 				ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n",
1044 					  status);
1045 		} else {
1046 			status = ice_fwlog_unregister(hw);
1047 			if (status)
1048 				ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n",
1049 					  status);
1050 		}
1051 	}
1052 
1053 	status = ice_init_nvm(hw);
1054 	if (status)
1055 		goto err_unroll_cqinit;
1056 
1057 	if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
1058 		ice_print_rollback_msg(hw);
1059 
1060 	status = ice_clear_pf_cfg(hw);
1061 	if (status)
1062 		goto err_unroll_cqinit;
1063 
1064 	ice_clear_pxe_mode(hw);
1065 
1066 	status = ice_get_caps(hw);
1067 	if (status)
1068 		goto err_unroll_cqinit;
1069 
1070 	hw->port_info = (struct ice_port_info *)
1071 			ice_malloc(hw, sizeof(*hw->port_info));
1072 	if (!hw->port_info) {
1073 		status = ICE_ERR_NO_MEMORY;
1074 		goto err_unroll_cqinit;
1075 	}
1076 
1077 	/* set the back pointer to HW */
1078 	hw->port_info->hw = hw;
1079 
1080 	/* Initialize port_info struct with switch configuration data */
1081 	status = ice_get_initial_sw_cfg(hw);
1082 	if (status)
1083 		goto err_unroll_alloc;
1084 
1085 	hw->evb_veb = true;
1086 	/* Query the allocated resources for Tx scheduler */
1087 	status = ice_sched_query_res_alloc(hw);
1088 	if (status) {
1089 		ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1090 		goto err_unroll_alloc;
1091 	}
1092 	ice_sched_get_psm_clk_freq(hw);
1093 
1094 	/* Initialize port_info struct with scheduler data */
1095 	status = ice_sched_init_port(hw->port_info);
1096 	if (status)
1097 		goto err_unroll_sched;
1098 	pcaps = (struct ice_aqc_get_phy_caps_data *)
1099 		ice_malloc(hw, sizeof(*pcaps));
1100 	if (!pcaps) {
1101 		status = ICE_ERR_NO_MEMORY;
1102 		goto err_unroll_sched;
1103 	}
1104 
1105 	/* Initialize port_info struct with PHY capabilities */
1106 	status = ice_aq_get_phy_caps(hw->port_info, false,
1107 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
1108 	ice_free(hw, pcaps);
1109 	if (status)
1110 		ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
1111 			 status);
1112 
1113 	/* Initialize port_info struct with link information */
1114 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1115 	if (status)
1116 		goto err_unroll_sched;
1117 	/* need a valid SW entry point to build a Tx tree */
1118 	if (!hw->sw_entry_point_layer) {
1119 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1120 		status = ICE_ERR_CFG;
1121 		goto err_unroll_sched;
1122 	}
1123 	INIT_LIST_HEAD(&hw->agg_list);
1124 	/* Initialize max burst size */
1125 	if (!hw->max_burst_size)
1126 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1127 	status = ice_init_fltr_mgmt_struct(hw);
1128 	if (status)
1129 		goto err_unroll_sched;
1130 
1131 	/* Get MAC information */
1132 
1133 	/* A single port can report up to two (LAN and WoL) addresses */
1134 	mac_buf = ice_calloc(hw, 2,
1135 			     sizeof(struct ice_aqc_manage_mac_read_resp));
1136 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1137 
1138 	if (!mac_buf) {
1139 		status = ICE_ERR_NO_MEMORY;
1140 		goto err_unroll_fltr_mgmt_struct;
1141 	}
1142 
1143 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1144 	ice_free(hw, mac_buf);
1145 
1146 	if (status)
1147 		goto err_unroll_fltr_mgmt_struct;
1148 
1149 	/* enable jumbo frame support at MAC level */
1150 	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
1151 				    NULL);
1152 	if (status)
1153 		goto err_unroll_fltr_mgmt_struct;
1154 
1155 	status = ice_init_hw_tbls(hw);
1156 	if (status)
1157 		goto err_unroll_fltr_mgmt_struct;
1158 	ice_init_lock(&hw->tnl_lock);
1159 
1160 	return ICE_SUCCESS;
1161 
1162 err_unroll_fltr_mgmt_struct:
1163 	ice_cleanup_fltr_mgmt_struct(hw);
1164 err_unroll_sched:
1165 	ice_sched_cleanup_all(hw);
1166 err_unroll_alloc:
1167 	ice_free(hw, hw->port_info);
1168 	hw->port_info = NULL;
1169 err_unroll_cqinit:
1170 	ice_destroy_all_ctrlq(hw);
1171 	return status;
1172 }
1173 
1174 /**
1175  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1176  * @hw: pointer to the hardware structure
1177  *
1178  * This should be called only during nominal operation, not as a result of
1179  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1180  * applicable initializations if it fails for any reason.
1181  */
1182 void ice_deinit_hw(struct ice_hw *hw)
1183 {
1184 	ice_cleanup_fltr_mgmt_struct(hw);
1185 
1186 	ice_sched_cleanup_all(hw);
1187 	ice_sched_clear_agg(hw);
1188 	ice_free_seg(hw);
1189 	ice_free_hw_tbls(hw);
1190 	ice_destroy_lock(&hw->tnl_lock);
1191 
1192 	if (hw->port_info) {
1193 		ice_free(hw, hw->port_info);
1194 		hw->port_info = NULL;
1195 	}
1196 
1197 	ice_destroy_all_ctrlq(hw);
1198 
1199 	/* Clear VSI contexts if not already cleared */
1200 	ice_clear_all_vsi_ctx(hw);
1201 }
1202 
1203 /**
1204  * ice_check_reset - Check to see if a global reset is complete
1205  * @hw: pointer to the hardware structure
1206  */
1207 enum ice_status ice_check_reset(struct ice_hw *hw)
1208 {
1209 	u32 cnt, reg = 0, grst_timeout, uld_mask;
1210 
1211 	/* Poll for Device Active state in case a recent CORER, GLOBR,
1212 	 * or EMPR has occurred. The grst delay value is in 100ms units.
1213 	 * Add 1sec for outstanding AQ commands that can take a long time.
1214 	 */
1215 	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1216 			GLGEN_RSTCTL_GRSTDEL_S) + 10;
1217 
1218 	for (cnt = 0; cnt < grst_timeout; cnt++) {
1219 		ice_msec_delay(100, true);
1220 		reg = rd32(hw, GLGEN_RSTAT);
1221 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1222 			break;
1223 	}
1224 
1225 	if (cnt == grst_timeout) {
1226 		ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1227 		return ICE_ERR_RESET_FAILED;
1228 	}
1229 
1230 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
1231 				 GLNVM_ULD_PCIER_DONE_1_M |\
1232 				 GLNVM_ULD_CORER_DONE_M |\
1233 				 GLNVM_ULD_GLOBR_DONE_M |\
1234 				 GLNVM_ULD_POR_DONE_M |\
1235 				 GLNVM_ULD_POR_DONE_1_M |\
1236 				 GLNVM_ULD_PCIER_DONE_2_M)
1237 
1238 	uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ?
1239 					  GLNVM_ULD_PE_DONE_M : 0);
1240 
1241 	/* Device is Active; check Global Reset processes are done */
1242 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1243 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
1244 		if (reg == uld_mask) {
1245 			ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1246 			break;
1247 		}
1248 		ice_msec_delay(10, true);
1249 	}
1250 
1251 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1252 		ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1253 			  reg);
1254 		return ICE_ERR_RESET_FAILED;
1255 	}
1256 
1257 	return ICE_SUCCESS;
1258 }
1259 
1260 /**
1261  * ice_pf_reset - Reset the PF
1262  * @hw: pointer to the hardware structure
1263  *
1264  * If a global reset has been triggered, this function checks
1265  * for its completion and then issues the PF reset
1266  */
1267 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1268 {
1269 	u32 cnt, reg;
1270 
1271 	/* If at function entry a global reset was already in progress, i.e.
1272 	 * state is not 'device active' or any of the reset done bits are not
1273 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1274 	 * global reset is done.
1275 	 */
1276 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1277 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1278 		/* poll on global reset currently in progress until done */
1279 		if (ice_check_reset(hw))
1280 			return ICE_ERR_RESET_FAILED;
1281 
1282 		return ICE_SUCCESS;
1283 	}
1284 
1285 	/* Reset the PF */
1286 	reg = rd32(hw, PFGEN_CTRL);
1287 
1288 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1289 
1290 	/* Wait for the PFR to complete. The wait time is the global config lock
1291 	 * timeout plus the PFR timeout which will account for a possible reset
1292 	 * that is occurring during a download package operation.
1293 	 */
1294 	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1295 		ICE_PF_RESET_WAIT_COUNT; cnt++) {
1296 		reg = rd32(hw, PFGEN_CTRL);
1297 		if (!(reg & PFGEN_CTRL_PFSWR_M))
1298 			break;
1299 
1300 		ice_msec_delay(1, true);
1301 	}
1302 
1303 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1304 		ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1305 		return ICE_ERR_RESET_FAILED;
1306 	}
1307 
1308 	return ICE_SUCCESS;
1309 }
1310 
1311 /**
1312  * ice_reset - Perform different types of reset
1313  * @hw: pointer to the hardware structure
1314  * @req: reset request
1315  *
1316  * This function triggers a reset as specified by the req parameter.
1317  *
1318  * Note:
1319  * If anything other than a PF reset is triggered, PXE mode is restored.
1320  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1321  * interface has been restored in the rebuild flow.
1322  */
1323 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1324 {
1325 	u32 val = 0;
1326 
1327 	switch (req) {
1328 	case ICE_RESET_PFR:
1329 		return ice_pf_reset(hw);
1330 	case ICE_RESET_CORER:
1331 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1332 		val = GLGEN_RTRIG_CORER_M;
1333 		break;
1334 	case ICE_RESET_GLOBR:
1335 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1336 		val = GLGEN_RTRIG_GLOBR_M;
1337 		break;
1338 	default:
1339 		return ICE_ERR_PARAM;
1340 	}
1341 
1342 	val |= rd32(hw, GLGEN_RTRIG);
1343 	wr32(hw, GLGEN_RTRIG, val);
1344 	ice_flush(hw);
1345 
1346 	/* wait for the FW to be ready */
1347 	return ice_check_reset(hw);
1348 }
1349 
1350 /**
1351  * ice_copy_rxq_ctx_to_hw
1352  * @hw: pointer to the hardware structure
1353  * @ice_rxq_ctx: pointer to the rxq context
1354  * @rxq_index: the index of the Rx queue
1355  *
1356  * Copies rxq context from dense structure to HW register space
1357  */
1358 static enum ice_status
1359 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1360 {
1361 	u8 i;
1362 
1363 	if (!ice_rxq_ctx)
1364 		return ICE_ERR_BAD_PTR;
1365 
1366 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1367 		return ICE_ERR_PARAM;
1368 
1369 	/* Copy each dword separately to HW */
1370 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1371 		wr32(hw, QRX_CONTEXT(i, rxq_index),
1372 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1373 
1374 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1375 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1376 	}
1377 
1378 	return ICE_SUCCESS;
1379 }
1380 
1381 /* LAN Rx Queue Context */
1382 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1383 	/* Field		Width	LSB */
1384 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1385 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1386 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1387 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1388 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1389 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1390 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1391 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1392 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1393 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1394 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1395 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1396 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1397 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1398 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1399 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1400 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1401 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1402 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1403 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1404 	{ 0 }
1405 };
1406 
1407 /**
1408  * ice_write_rxq_ctx
1409  * @hw: pointer to the hardware structure
1410  * @rlan_ctx: pointer to the rxq context
1411  * @rxq_index: the index of the Rx queue
1412  *
1413  * Converts rxq context from sparse to dense structure and then writes
1414  * it to HW register space and enables the hardware to prefetch descriptors
1415  * instead of only fetching them on demand
1416  */
1417 enum ice_status
1418 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1419 		  u32 rxq_index)
1420 {
1421 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1422 
1423 	if (!rlan_ctx)
1424 		return ICE_ERR_BAD_PTR;
1425 
1426 	rlan_ctx->prefena = 1;
1427 
1428 	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1429 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1430 }
1431 
1432 /**
1433  * ice_clear_rxq_ctx
1434  * @hw: pointer to the hardware structure
1435  * @rxq_index: the index of the Rx queue to clear
1436  *
1437  * Clears rxq context in HW register space
1438  */
1439 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1440 {
1441 	u8 i;
1442 
1443 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1444 		return ICE_ERR_PARAM;
1445 
1446 	/* Clear each dword register separately */
1447 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1448 		wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1449 
1450 	return ICE_SUCCESS;
1451 }
1452 
1453 /* LAN Tx Queue Context */
1454 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1455 				    /* Field			Width	LSB */
1456 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1457 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1458 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1459 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1460 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1461 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1462 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1463 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1464 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1465 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1466 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1467 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1468 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1469 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1470 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1471 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1472 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1473 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1474 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1475 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1476 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1477 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1478 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1479 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1480 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1481 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1482 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1483 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1484 	{ 0 }
1485 };
1486 
1487 /**
1488  * ice_copy_tx_cmpltnq_ctx_to_hw
1489  * @hw: pointer to the hardware structure
1490  * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1491  * @tx_cmpltnq_index: the index of the completion queue
1492  *
1493  * Copies Tx completion queue context from dense structure to HW register space
1494  */
1495 static enum ice_status
1496 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1497 			      u32 tx_cmpltnq_index)
1498 {
1499 	u8 i;
1500 
1501 	if (!ice_tx_cmpltnq_ctx)
1502 		return ICE_ERR_BAD_PTR;
1503 
1504 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1505 		return ICE_ERR_PARAM;
1506 
1507 	/* Copy each dword separately to HW */
1508 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1509 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1510 		     *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1511 
1512 		ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1513 			  *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1514 	}
1515 
1516 	return ICE_SUCCESS;
1517 }
1518 
1519 /* LAN Tx Completion Queue Context */
1520 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1521 				       /* Field			Width   LSB */
1522 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base,			57,	0),
1523 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len,		18,	64),
1524 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation,		1,	96),
1525 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr,		22,	97),
1526 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num,		3,	128),
1527 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num,		10,	131),
1528 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type,		2,	141),
1529 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr,		1,	160),
1530 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid,		8,	161),
1531 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache,		512,	192),
1532 	{ 0 }
1533 };
1534 
1535 /**
1536  * ice_write_tx_cmpltnq_ctx
1537  * @hw: pointer to the hardware structure
1538  * @tx_cmpltnq_ctx: pointer to the completion queue context
1539  * @tx_cmpltnq_index: the index of the completion queue
1540  *
1541  * Converts completion queue context from sparse to dense structure and then
1542  * writes it to HW register space
1543  */
1544 enum ice_status
1545 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1546 			 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1547 			 u32 tx_cmpltnq_index)
1548 {
1549 	u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1550 
1551 	ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1552 	return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1553 }
1554 
1555 /**
1556  * ice_clear_tx_cmpltnq_ctx
1557  * @hw: pointer to the hardware structure
1558  * @tx_cmpltnq_index: the index of the completion queue to clear
1559  *
1560  * Clears Tx completion queue context in HW register space
1561  */
1562 enum ice_status
1563 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1564 {
1565 	u8 i;
1566 
1567 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1568 		return ICE_ERR_PARAM;
1569 
1570 	/* Clear each dword register separately */
1571 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1572 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1573 
1574 	return ICE_SUCCESS;
1575 }
1576 
1577 /**
1578  * ice_copy_tx_drbell_q_ctx_to_hw
1579  * @hw: pointer to the hardware structure
1580  * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1581  * @tx_drbell_q_index: the index of the doorbell queue
1582  *
1583  * Copies doorbell queue context from dense structure to HW register space
1584  */
1585 static enum ice_status
1586 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1587 			       u32 tx_drbell_q_index)
1588 {
1589 	u8 i;
1590 
1591 	if (!ice_tx_drbell_q_ctx)
1592 		return ICE_ERR_BAD_PTR;
1593 
1594 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1595 		return ICE_ERR_PARAM;
1596 
1597 	/* Copy each dword separately to HW */
1598 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1599 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1600 		     *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1601 
1602 		ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1603 			  *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1604 	}
1605 
1606 	return ICE_SUCCESS;
1607 }
1608 
1609 /* LAN Tx Doorbell Queue Context info */
1610 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1611 					/* Field		Width   LSB */
1612 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, base,		57,	0),
1613 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len,		13,	64),
1614 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num,		3,	80),
1615 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num,		8,	84),
1616 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type,		2,	94),
1617 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid,		8,	96),
1618 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd,		1,	104),
1619 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr,		1,	108),
1620 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en,		1,	112),
1621 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head,		13,	128),
1622 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail,		13,	144),
1623 	{ 0 }
1624 };
1625 
1626 /**
1627  * ice_write_tx_drbell_q_ctx
1628  * @hw: pointer to the hardware structure
1629  * @tx_drbell_q_ctx: pointer to the doorbell queue context
1630  * @tx_drbell_q_index: the index of the doorbell queue
1631  *
1632  * Converts doorbell queue context from sparse to dense structure and then
1633  * writes it to HW register space
1634  */
1635 enum ice_status
1636 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1637 			  struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1638 			  u32 tx_drbell_q_index)
1639 {
1640 	u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1641 
1642 	ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1643 		    ice_tx_drbell_q_ctx_info);
1644 	return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1645 }
1646 
1647 /**
1648  * ice_clear_tx_drbell_q_ctx
1649  * @hw: pointer to the hardware structure
1650  * @tx_drbell_q_index: the index of the doorbell queue to clear
1651  *
1652  * Clears doorbell queue context in HW register space
1653  */
1654 enum ice_status
1655 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1656 {
1657 	u8 i;
1658 
1659 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1660 		return ICE_ERR_PARAM;
1661 
1662 	/* Clear each dword register separately */
1663 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1664 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1665 
1666 	return ICE_SUCCESS;
1667 }
1668 
1669 /* FW Admin Queue command wrappers */
1670 
1671 /**
1672  * ice_should_retry_sq_send_cmd
1673  * @opcode: AQ opcode
1674  *
1675  * Decide if we should retry the send command routine for the ATQ, depending
1676  * on the opcode.
1677  */
1678 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1679 {
1680 	switch (opcode) {
1681 	case ice_aqc_opc_dnl_get_status:
1682 	case ice_aqc_opc_dnl_run:
1683 	case ice_aqc_opc_dnl_call:
1684 	case ice_aqc_opc_dnl_read_sto:
1685 	case ice_aqc_opc_dnl_write_sto:
1686 	case ice_aqc_opc_dnl_set_breakpoints:
1687 	case ice_aqc_opc_dnl_read_log:
1688 	case ice_aqc_opc_get_link_topo:
1689 	case ice_aqc_opc_done_alt_write:
1690 	case ice_aqc_opc_lldp_stop:
1691 	case ice_aqc_opc_lldp_start:
1692 	case ice_aqc_opc_lldp_filter_ctrl:
1693 		return true;
1694 	}
1695 
1696 	return false;
1697 }
1698 
1699 /**
1700  * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1701  * @hw: pointer to the HW struct
1702  * @cq: pointer to the specific Control queue
1703  * @desc: prefilled descriptor describing the command
1704  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1705  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1706  * @cd: pointer to command details structure
1707  *
1708  * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1709  * Queue if the EBUSY AQ error is returned.
1710  */
1711 static enum ice_status
1712 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1713 		      struct ice_aq_desc *desc, void *buf, u16 buf_size,
1714 		      struct ice_sq_cd *cd)
1715 {
1716 	struct ice_aq_desc desc_cpy;
1717 	enum ice_status status;
1718 	bool is_cmd_for_retry;
1719 	u8 *buf_cpy = NULL;
1720 	u8 idx = 0;
1721 	u16 opcode;
1722 
1723 	opcode = LE16_TO_CPU(desc->opcode);
1724 	is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1725 	ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1726 
1727 	if (is_cmd_for_retry) {
1728 		if (buf) {
1729 			buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1730 			if (!buf_cpy)
1731 				return ICE_ERR_NO_MEMORY;
1732 		}
1733 
1734 		ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1735 			   ICE_NONDMA_TO_NONDMA);
1736 	}
1737 
1738 	do {
1739 		status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1740 
1741 		if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1742 		    hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1743 			break;
1744 
1745 		if (buf_cpy)
1746 			ice_memcpy(buf, buf_cpy, buf_size,
1747 				   ICE_NONDMA_TO_NONDMA);
1748 
1749 		ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1750 			   ICE_NONDMA_TO_NONDMA);
1751 
1752 		ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1753 
1754 	} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1755 
1756 	if (buf_cpy)
1757 		ice_free(hw, buf_cpy);
1758 
1759 	return status;
1760 }
1761 
1762 /**
1763  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1764  * @hw: pointer to the HW struct
1765  * @desc: descriptor describing the command
1766  * @buf: buffer to use for indirect commands (NULL for direct commands)
1767  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1768  * @cd: pointer to command details structure
1769  *
1770  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1771  */
1772 enum ice_status
1773 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1774 		u16 buf_size, struct ice_sq_cd *cd)
1775 {
1776 	return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1777 }
1778 
1779 /**
1780  * ice_aq_get_fw_ver
1781  * @hw: pointer to the HW struct
1782  * @cd: pointer to command details structure or NULL
1783  *
1784  * Get the firmware version (0x0001) from the admin queue commands
1785  */
1786 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1787 {
1788 	struct ice_aqc_get_ver *resp;
1789 	struct ice_aq_desc desc;
1790 	enum ice_status status;
1791 
1792 	resp = &desc.params.get_ver;
1793 
1794 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1795 
1796 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1797 
1798 	if (!status) {
1799 		hw->fw_branch = resp->fw_branch;
1800 		hw->fw_maj_ver = resp->fw_major;
1801 		hw->fw_min_ver = resp->fw_minor;
1802 		hw->fw_patch = resp->fw_patch;
1803 		hw->fw_build = LE32_TO_CPU(resp->fw_build);
1804 		hw->api_branch = resp->api_branch;
1805 		hw->api_maj_ver = resp->api_major;
1806 		hw->api_min_ver = resp->api_minor;
1807 		hw->api_patch = resp->api_patch;
1808 	}
1809 
1810 	return status;
1811 }
1812 
1813 /**
1814  * ice_aq_send_driver_ver
1815  * @hw: pointer to the HW struct
1816  * @dv: driver's major, minor version
1817  * @cd: pointer to command details structure or NULL
1818  *
1819  * Send the driver version (0x0002) to the firmware
1820  */
1821 enum ice_status
1822 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1823 		       struct ice_sq_cd *cd)
1824 {
1825 	struct ice_aqc_driver_ver *cmd;
1826 	struct ice_aq_desc desc;
1827 	u16 len;
1828 
1829 	cmd = &desc.params.driver_ver;
1830 
1831 	if (!dv)
1832 		return ICE_ERR_PARAM;
1833 
1834 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1835 
1836 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1837 	cmd->major_ver = dv->major_ver;
1838 	cmd->minor_ver = dv->minor_ver;
1839 	cmd->build_ver = dv->build_ver;
1840 	cmd->subbuild_ver = dv->subbuild_ver;
1841 
1842 	len = 0;
1843 	while (len < sizeof(dv->driver_string) &&
1844 	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1845 		len++;
1846 
1847 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1848 }
1849 
1850 /**
1851  * ice_aq_q_shutdown
1852  * @hw: pointer to the HW struct
1853  * @unloading: is the driver unloading itself
1854  *
1855  * Tell the Firmware that we're shutting down the AdminQ and whether
1856  * or not the driver is unloading as well (0x0003).
1857  */
1858 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1859 {
1860 	struct ice_aqc_q_shutdown *cmd;
1861 	struct ice_aq_desc desc;
1862 
1863 	cmd = &desc.params.q_shutdown;
1864 
1865 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1866 
1867 	if (unloading)
1868 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1869 
1870 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1871 }
1872 
1873 /**
1874  * ice_aq_req_res
1875  * @hw: pointer to the HW struct
1876  * @res: resource ID
1877  * @access: access type
1878  * @sdp_number: resource number
1879  * @timeout: the maximum time in ms that the driver may hold the resource
1880  * @cd: pointer to command details structure or NULL
1881  *
1882  * Requests common resource using the admin queue commands (0x0008).
1883  * When attempting to acquire the Global Config Lock, the driver can
1884  * learn of three states:
1885  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1886  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1887  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1888  *                          successfully downloaded the package; the driver does
1889  *                          not have to download the package and can continue
1890  *                          loading
1891  *
1892  * Note that if the caller is in an acquire lock, perform action, release lock
1893  * phase of operation, it is possible that the FW may detect a timeout and issue
1894  * a CORER. In this case, the driver will receive a CORER interrupt and will
1895  * have to determine its cause. The calling thread that is handling this flow
1896  * will likely get an error propagated back to it indicating the Download
1897  * Package, Update Package or the Release Resource AQ commands timed out.
1898  */
1899 static enum ice_status
1900 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1901 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1902 	       struct ice_sq_cd *cd)
1903 {
1904 	struct ice_aqc_req_res *cmd_resp;
1905 	struct ice_aq_desc desc;
1906 	enum ice_status status;
1907 
1908 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1909 
1910 	cmd_resp = &desc.params.res_owner;
1911 
1912 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1913 
1914 	cmd_resp->res_id = CPU_TO_LE16(res);
1915 	cmd_resp->access_type = CPU_TO_LE16(access);
1916 	cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1917 	cmd_resp->timeout = CPU_TO_LE32(*timeout);
1918 	*timeout = 0;
1919 
1920 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1921 
1922 	/* The completion specifies the maximum time in ms that the driver
1923 	 * may hold the resource in the Timeout field.
1924 	 */
1925 
1926 	/* Global config lock response utilizes an additional status field.
1927 	 *
1928 	 * If the Global config lock resource is held by some other driver, the
1929 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1930 	 * and the timeout field indicates the maximum time the current owner
1931 	 * of the resource has to free it.
1932 	 */
1933 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1934 		if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1935 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1936 			return ICE_SUCCESS;
1937 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1938 			   ICE_AQ_RES_GLBL_IN_PROG) {
1939 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1940 			return ICE_ERR_AQ_ERROR;
1941 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1942 			   ICE_AQ_RES_GLBL_DONE) {
1943 			return ICE_ERR_AQ_NO_WORK;
1944 		}
1945 
1946 		/* invalid FW response, force a timeout immediately */
1947 		*timeout = 0;
1948 		return ICE_ERR_AQ_ERROR;
1949 	}
1950 
1951 	/* If the resource is held by some other driver, the command completes
1952 	 * with a busy return value and the timeout field indicates the maximum
1953 	 * time the current owner of the resource has to free it.
1954 	 */
1955 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1956 		*timeout = LE32_TO_CPU(cmd_resp->timeout);
1957 
1958 	return status;
1959 }
1960 
1961 /**
1962  * ice_aq_release_res
1963  * @hw: pointer to the HW struct
1964  * @res: resource ID
1965  * @sdp_number: resource number
1966  * @cd: pointer to command details structure or NULL
1967  *
1968  * release common resource using the admin queue commands (0x0009)
1969  */
1970 static enum ice_status
1971 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1972 		   struct ice_sq_cd *cd)
1973 {
1974 	struct ice_aqc_req_res *cmd;
1975 	struct ice_aq_desc desc;
1976 
1977 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1978 
1979 	cmd = &desc.params.res_owner;
1980 
1981 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1982 
1983 	cmd->res_id = CPU_TO_LE16(res);
1984 	cmd->res_number = CPU_TO_LE32(sdp_number);
1985 
1986 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1987 }
1988 
1989 /**
1990  * ice_acquire_res
1991  * @hw: pointer to the HW structure
1992  * @res: resource ID
1993  * @access: access type (read or write)
1994  * @timeout: timeout in milliseconds
1995  *
1996  * This function will attempt to acquire the ownership of a resource.
1997  */
1998 enum ice_status
1999 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2000 		enum ice_aq_res_access_type access, u32 timeout)
2001 {
2002 #define ICE_RES_POLLING_DELAY_MS	10
2003 	u32 delay = ICE_RES_POLLING_DELAY_MS;
2004 	u32 time_left = timeout;
2005 	enum ice_status status;
2006 
2007 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2008 
2009 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2010 
2011 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
2012 	 * previously acquired the resource and performed any necessary updates;
2013 	 * in this case the caller does not obtain the resource and has no
2014 	 * further work to do.
2015 	 */
2016 	if (status == ICE_ERR_AQ_NO_WORK)
2017 		goto ice_acquire_res_exit;
2018 
2019 	if (status)
2020 		ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
2021 
2022 	/* If necessary, poll until the current lock owner timeouts */
2023 	timeout = time_left;
2024 	while (status && timeout && time_left) {
2025 		ice_msec_delay(delay, true);
2026 		timeout = (timeout > delay) ? timeout - delay : 0;
2027 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2028 
2029 		if (status == ICE_ERR_AQ_NO_WORK)
2030 			/* lock free, but no work to do */
2031 			break;
2032 
2033 		if (!status)
2034 			/* lock acquired */
2035 			break;
2036 	}
2037 	if (status && status != ICE_ERR_AQ_NO_WORK)
2038 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2039 
2040 ice_acquire_res_exit:
2041 	if (status == ICE_ERR_AQ_NO_WORK) {
2042 		if (access == ICE_RES_WRITE)
2043 			ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2044 		else
2045 			ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2046 	}
2047 	return status;
2048 }
2049 
2050 /**
2051  * ice_release_res
2052  * @hw: pointer to the HW structure
2053  * @res: resource ID
2054  *
2055  * This function will release a resource using the proper Admin Command.
2056  */
2057 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2058 {
2059 	enum ice_status status;
2060 	u32 total_delay = 0;
2061 
2062 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2063 
2064 	status = ice_aq_release_res(hw, res, 0, NULL);
2065 
2066 	/* there are some rare cases when trying to release the resource
2067 	 * results in an admin queue timeout, so handle them correctly
2068 	 */
2069 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
2070 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
2071 		ice_msec_delay(1, true);
2072 		status = ice_aq_release_res(hw, res, 0, NULL);
2073 		total_delay++;
2074 	}
2075 }
2076 
2077 /**
2078  * ice_aq_alloc_free_res - command to allocate/free resources
2079  * @hw: pointer to the HW struct
2080  * @num_entries: number of resource entries in buffer
2081  * @buf: Indirect buffer to hold data parameters and response
2082  * @buf_size: size of buffer for indirect commands
2083  * @opc: pass in the command opcode
2084  * @cd: pointer to command details structure or NULL
2085  *
2086  * Helper function to allocate/free resources using the admin queue commands
2087  */
2088 enum ice_status
2089 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2090 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2091 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2092 {
2093 	struct ice_aqc_alloc_free_res_cmd *cmd;
2094 	struct ice_aq_desc desc;
2095 
2096 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2097 
2098 	cmd = &desc.params.sw_res_ctrl;
2099 
2100 	if (!buf)
2101 		return ICE_ERR_PARAM;
2102 
2103 	if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
2104 		return ICE_ERR_PARAM;
2105 
2106 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2107 
2108 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2109 
2110 	cmd->num_entries = CPU_TO_LE16(num_entries);
2111 
2112 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2113 }
2114 
2115 /**
2116  * ice_alloc_hw_res - allocate resource
2117  * @hw: pointer to the HW struct
2118  * @type: type of resource
2119  * @num: number of resources to allocate
2120  * @btm: allocate from bottom
2121  * @res: pointer to array that will receive the resources
2122  */
2123 enum ice_status
2124 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2125 {
2126 	struct ice_aqc_alloc_free_res_elem *buf;
2127 	enum ice_status status;
2128 	u16 buf_len;
2129 
2130 	buf_len = ice_struct_size(buf, elem, num);
2131 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2132 	if (!buf)
2133 		return ICE_ERR_NO_MEMORY;
2134 
2135 	/* Prepare buffer to allocate resource. */
2136 	buf->num_elems = CPU_TO_LE16(num);
2137 	buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2138 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2139 	if (btm)
2140 		buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2141 
2142 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2143 				       ice_aqc_opc_alloc_res, NULL);
2144 	if (status)
2145 		goto ice_alloc_res_exit;
2146 
2147 	ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2148 		   ICE_NONDMA_TO_NONDMA);
2149 
2150 ice_alloc_res_exit:
2151 	ice_free(hw, buf);
2152 	return status;
2153 }
2154 
2155 /**
2156  * ice_free_hw_res - free allocated HW resource
2157  * @hw: pointer to the HW struct
2158  * @type: type of resource to free
2159  * @num: number of resources
2160  * @res: pointer to array that contains the resources to free
2161  */
2162 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2163 {
2164 	struct ice_aqc_alloc_free_res_elem *buf;
2165 	enum ice_status status;
2166 	u16 buf_len;
2167 
2168 	buf_len = ice_struct_size(buf, elem, num);
2169 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2170 	if (!buf)
2171 		return ICE_ERR_NO_MEMORY;
2172 
2173 	/* Prepare buffer to free resource. */
2174 	buf->num_elems = CPU_TO_LE16(num);
2175 	buf->res_type = CPU_TO_LE16(type);
2176 	ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2177 		   ICE_NONDMA_TO_NONDMA);
2178 
2179 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2180 				       ice_aqc_opc_free_res, NULL);
2181 	if (status)
2182 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2183 
2184 	ice_free(hw, buf);
2185 	return status;
2186 }
2187 
2188 /**
2189  * ice_get_num_per_func - determine number of resources per PF
2190  * @hw: pointer to the HW structure
2191  * @max: value to be evenly split between each PF
2192  *
2193  * Determine the number of valid functions by going through the bitmap returned
2194  * from parsing capabilities and use this to calculate the number of resources
2195  * per PF based on the max value passed in.
2196  */
2197 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2198 {
2199 	u8 funcs;
2200 
2201 #define ICE_CAPS_VALID_FUNCS_M	0xFF
2202 	funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2203 			     ICE_CAPS_VALID_FUNCS_M);
2204 
2205 	if (!funcs)
2206 		return 0;
2207 
2208 	return max / funcs;
2209 }
2210 
2211 /**
2212  * ice_print_led_caps - print LED capabilities
2213  * @hw: pointer to the ice_hw instance
2214  * @caps: pointer to common caps instance
2215  * @prefix: string to prefix when printing
2216  * @dbg: set to indicate debug print
2217  */
2218 static void
2219 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2220 		   char const *prefix, bool dbg)
2221 {
2222 	u8 i;
2223 
2224 	if (dbg)
2225 		ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
2226 			  caps->led_pin_num);
2227 	else
2228 		ice_info(hw, "%s: led_pin_num = %d\n", prefix,
2229 			 caps->led_pin_num);
2230 
2231 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
2232 		if (!caps->led[i])
2233 			continue;
2234 
2235 		if (dbg)
2236 			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
2237 				  prefix, i, caps->led[i]);
2238 		else
2239 			ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
2240 				 caps->led[i]);
2241 	}
2242 }
2243 
2244 /**
2245  * ice_print_sdp_caps - print SDP capabilities
2246  * @hw: pointer to the ice_hw instance
2247  * @caps: pointer to common caps instance
2248  * @prefix: string to prefix when printing
2249  * @dbg: set to indicate debug print
2250  */
2251 static void
2252 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2253 		   char const *prefix, bool dbg)
2254 {
2255 	u8 i;
2256 
2257 	if (dbg)
2258 		ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
2259 			  caps->sdp_pin_num);
2260 	else
2261 		ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
2262 			 caps->sdp_pin_num);
2263 
2264 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
2265 		if (!caps->sdp[i])
2266 			continue;
2267 
2268 		if (dbg)
2269 			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
2270 				  prefix, i, caps->sdp[i]);
2271 		else
2272 			ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
2273 				 i, caps->sdp[i]);
2274 	}
2275 }
2276 
2277 /**
2278  * ice_parse_common_caps - parse common device/function capabilities
2279  * @hw: pointer to the HW struct
2280  * @caps: pointer to common capabilities structure
2281  * @elem: the capability element to parse
2282  * @prefix: message prefix for tracing capabilities
2283  *
2284  * Given a capability element, extract relevant details into the common
2285  * capability structure.
2286  *
2287  * Returns: true if the capability matches one of the common capability ids,
2288  * false otherwise.
2289  */
2290 static bool
2291 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2292 		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
2293 {
2294 	u32 logical_id = LE32_TO_CPU(elem->logical_id);
2295 	u32 phys_id = LE32_TO_CPU(elem->phys_id);
2296 	u32 number = LE32_TO_CPU(elem->number);
2297 	u16 cap = LE16_TO_CPU(elem->cap);
2298 	bool found = true;
2299 
2300 	switch (cap) {
2301 	case ICE_AQC_CAPS_SWITCHING_MODE:
2302 		caps->switching_mode = number;
2303 		ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
2304 			  caps->switching_mode);
2305 		break;
2306 	case ICE_AQC_CAPS_MANAGEABILITY_MODE:
2307 		caps->mgmt_mode = number;
2308 		caps->mgmt_protocols_mctp = logical_id;
2309 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
2310 			  caps->mgmt_mode);
2311 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
2312 			  caps->mgmt_protocols_mctp);
2313 		break;
2314 	case ICE_AQC_CAPS_OS2BMC:
2315 		caps->os2bmc = number;
2316 		ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
2317 		break;
2318 	case ICE_AQC_CAPS_VALID_FUNCTIONS:
2319 		caps->valid_functions = number;
2320 		ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2321 			  caps->valid_functions);
2322 		break;
2323 	case ICE_AQC_CAPS_SRIOV:
2324 		caps->sr_iov_1_1 = (number == 1);
2325 		ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2326 			  caps->sr_iov_1_1);
2327 		break;
2328 	case ICE_AQC_CAPS_802_1QBG:
2329 		caps->evb_802_1_qbg = (number == 1);
2330 		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
2331 		break;
2332 	case ICE_AQC_CAPS_802_1BR:
2333 		caps->evb_802_1_qbh = (number == 1);
2334 		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
2335 		break;
2336 	case ICE_AQC_CAPS_DCB:
2337 		caps->dcb = (number == 1);
2338 		caps->active_tc_bitmap = logical_id;
2339 		caps->maxtc = phys_id;
2340 		ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2341 		ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2342 			  caps->active_tc_bitmap);
2343 		ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2344 		break;
2345 	case ICE_AQC_CAPS_ISCSI:
2346 		caps->iscsi = (number == 1);
2347 		ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
2348 		break;
2349 	case ICE_AQC_CAPS_RSS:
2350 		caps->rss_table_size = number;
2351 		caps->rss_table_entry_width = logical_id;
2352 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2353 			  caps->rss_table_size);
2354 		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2355 			  caps->rss_table_entry_width);
2356 		break;
2357 	case ICE_AQC_CAPS_RXQS:
2358 		caps->num_rxq = number;
2359 		caps->rxq_first_id = phys_id;
2360 		ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2361 			  caps->num_rxq);
2362 		ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2363 			  caps->rxq_first_id);
2364 		break;
2365 	case ICE_AQC_CAPS_TXQS:
2366 		caps->num_txq = number;
2367 		caps->txq_first_id = phys_id;
2368 		ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2369 			  caps->num_txq);
2370 		ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2371 			  caps->txq_first_id);
2372 		break;
2373 	case ICE_AQC_CAPS_MSIX:
2374 		caps->num_msix_vectors = number;
2375 		caps->msix_vector_first_id = phys_id;
2376 		ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2377 			  caps->num_msix_vectors);
2378 		ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2379 			  caps->msix_vector_first_id);
2380 		break;
2381 	case ICE_AQC_CAPS_NVM_MGMT:
2382 		caps->sec_rev_disabled =
2383 			(number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2384 			true : false;
2385 		ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2386 			  caps->sec_rev_disabled);
2387 		caps->update_disabled =
2388 			(number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2389 			true : false;
2390 		ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2391 			  caps->update_disabled);
2392 		caps->nvm_unified_update =
2393 			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2394 			true : false;
2395 		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2396 			  caps->nvm_unified_update);
2397 		break;
2398 	case ICE_AQC_CAPS_CEM:
2399 		caps->mgmt_cem = (number == 1);
2400 		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
2401 			  caps->mgmt_cem);
2402 		break;
2403 	case ICE_AQC_CAPS_IWARP:
2404 		caps->iwarp = (number == 1);
2405 		ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
2406 		break;
2407 	case ICE_AQC_CAPS_ROCEV2_LAG:
2408 		caps->roce_lag = (number == 1);
2409 		ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n",
2410 			  prefix, caps->roce_lag);
2411 		break;
2412 	case ICE_AQC_CAPS_LED:
2413 		if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
2414 			caps->led[phys_id] = true;
2415 			caps->led_pin_num++;
2416 			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
2417 		}
2418 		break;
2419 	case ICE_AQC_CAPS_SDP:
2420 		if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2421 			caps->sdp[phys_id] = true;
2422 			caps->sdp_pin_num++;
2423 			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
2424 		}
2425 		break;
2426 	case ICE_AQC_CAPS_WR_CSR_PROT:
2427 		caps->wr_csr_prot = number;
2428 		caps->wr_csr_prot |= (u64)logical_id << 32;
2429 		ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2430 			  (unsigned long long)caps->wr_csr_prot);
2431 		break;
2432 	case ICE_AQC_CAPS_WOL_PROXY:
2433 		caps->num_wol_proxy_fltr = number;
2434 		caps->wol_proxy_vsi_seid = logical_id;
2435 		caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2436 		caps->acpi_prog_mthd = !!(phys_id &
2437 					  ICE_ACPI_PROG_MTHD_M);
2438 		caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2439 		ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
2440 			  caps->num_wol_proxy_fltr);
2441 		ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
2442 			  caps->wol_proxy_vsi_seid);
2443 		ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n",
2444 			  prefix, caps->apm_wol_support);
2445 		break;
2446 	case ICE_AQC_CAPS_MAX_MTU:
2447 		caps->max_mtu = number;
2448 		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2449 			  prefix, caps->max_mtu);
2450 		break;
2451 	case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2452 		caps->pcie_reset_avoidance = (number > 0);
2453 		ice_debug(hw, ICE_DBG_INIT,
2454 			  "%s: pcie_reset_avoidance = %d\n", prefix,
2455 			  caps->pcie_reset_avoidance);
2456 		break;
2457 	case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2458 		caps->reset_restrict_support = (number == 1);
2459 		ice_debug(hw, ICE_DBG_INIT,
2460 			  "%s: reset_restrict_support = %d\n", prefix,
2461 			  caps->reset_restrict_support);
2462 		break;
2463 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2464 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2465 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2466 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2467 	{
2468 		u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
2469 
2470 		caps->ext_topo_dev_img_ver_high[index] = number;
2471 		caps->ext_topo_dev_img_ver_low[index] = logical_id;
2472 		caps->ext_topo_dev_img_part_num[index] =
2473 			(phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2474 			ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2475 		caps->ext_topo_dev_img_load_en[index] =
2476 			(phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2477 		caps->ext_topo_dev_img_prog_en[index] =
2478 			(phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2479 		ice_debug(hw, ICE_DBG_INIT,
2480 			  "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2481 			  prefix, index,
2482 			  caps->ext_topo_dev_img_ver_high[index]);
2483 		ice_debug(hw, ICE_DBG_INIT,
2484 			  "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2485 			  prefix, index,
2486 			  caps->ext_topo_dev_img_ver_low[index]);
2487 		ice_debug(hw, ICE_DBG_INIT,
2488 			  "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2489 			  prefix, index,
2490 			  caps->ext_topo_dev_img_part_num[index]);
2491 		ice_debug(hw, ICE_DBG_INIT,
2492 			  "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2493 			  prefix, index,
2494 			  caps->ext_topo_dev_img_load_en[index]);
2495 		ice_debug(hw, ICE_DBG_INIT,
2496 			  "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2497 			  prefix, index,
2498 			  caps->ext_topo_dev_img_prog_en[index]);
2499 		break;
2500 	}
2501 	case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
2502 		caps->tx_sched_topo_comp_mode_en = (number == 1);
2503 		break;
2504 	case ICE_AQC_CAPS_DYN_FLATTENING:
2505 		caps->dyn_flattening_en = (number == 1);
2506 		ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n",
2507 			  prefix, caps->dyn_flattening_en);
2508 		break;
2509 	default:
2510 		/* Not one of the recognized common capabilities */
2511 		found = false;
2512 	}
2513 
2514 	return found;
2515 }
2516 
2517 /**
2518  * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2519  * @hw: pointer to the HW structure
2520  * @caps: pointer to capabilities structure to fix
2521  *
2522  * Re-calculate the capabilities that are dependent on the number of physical
2523  * ports; i.e. some features are not supported or function differently on
2524  * devices with more than 4 ports.
2525  */
2526 static void
2527 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2528 {
2529 	/* This assumes device capabilities are always scanned before function
2530 	 * capabilities during the initialization flow.
2531 	 */
2532 	if (hw->dev_caps.num_funcs > 4) {
2533 		/* Max 4 TCs per port */
2534 		caps->maxtc = 4;
2535 		ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2536 			  caps->maxtc);
2537 		if (caps->iwarp) {
2538 			ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2539 			caps->iwarp = 0;
2540 		}
2541 
2542 		/* print message only when processing device capabilities
2543 		 * during initialization.
2544 		 */
2545 		if (caps == &hw->dev_caps.common_cap)
2546 			ice_info(hw, "RDMA functionality is not available with the current device configuration.\n");
2547 	}
2548 }
2549 
2550 /**
2551  * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2552  * @hw: pointer to the HW struct
2553  * @func_p: pointer to function capabilities structure
2554  * @cap: pointer to the capability element to parse
2555  *
2556  * Extract function capabilities for ICE_AQC_CAPS_VF.
2557  */
2558 static void
2559 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2560 		       struct ice_aqc_list_caps_elem *cap)
2561 {
2562 	u32 number = LE32_TO_CPU(cap->number);
2563 	u32 logical_id = LE32_TO_CPU(cap->logical_id);
2564 
2565 	func_p->num_allocd_vfs = number;
2566 	func_p->vf_base_id = logical_id;
2567 	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2568 		  func_p->num_allocd_vfs);
2569 	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2570 		  func_p->vf_base_id);
2571 }
2572 
2573 /**
2574  * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2575  * @hw: pointer to the HW struct
2576  * @func_p: pointer to function capabilities structure
2577  * @cap: pointer to the capability element to parse
2578  *
2579  * Extract function capabilities for ICE_AQC_CAPS_VSI.
2580  */
2581 static void
2582 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2583 			struct ice_aqc_list_caps_elem *cap)
2584 {
2585 	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2586 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2587 		  LE32_TO_CPU(cap->number));
2588 	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2589 		  func_p->guar_num_vsi);
2590 }
2591 
2592 /**
2593  * ice_parse_func_caps - Parse function capabilities
2594  * @hw: pointer to the HW struct
2595  * @func_p: pointer to function capabilities structure
2596  * @buf: buffer containing the function capability records
2597  * @cap_count: the number of capabilities
2598  *
2599  * Helper function to parse function (0x000A) capabilities list. For
2600  * capabilities shared between device and function, this relies on
2601  * ice_parse_common_caps.
2602  *
2603  * Loop through the list of provided capabilities and extract the relevant
2604  * data into the function capabilities structured.
2605  */
2606 static void
2607 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2608 		    void *buf, u32 cap_count)
2609 {
2610 	struct ice_aqc_list_caps_elem *cap_resp;
2611 	u32 i;
2612 
2613 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2614 
2615 	ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2616 
2617 	for (i = 0; i < cap_count; i++) {
2618 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2619 		bool found;
2620 
2621 		found = ice_parse_common_caps(hw, &func_p->common_cap,
2622 					      &cap_resp[i], "func caps");
2623 
2624 		switch (cap) {
2625 		case ICE_AQC_CAPS_VF:
2626 			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2627 			break;
2628 		case ICE_AQC_CAPS_VSI:
2629 			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2630 			break;
2631 		default:
2632 			/* Don't list common capabilities as unknown */
2633 			if (!found)
2634 				ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2635 					  i, cap);
2636 			break;
2637 		}
2638 	}
2639 
2640 	ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2641 	ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2642 
2643 	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2644 }
2645 
2646 /**
2647  * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2648  * @hw: pointer to the HW struct
2649  * @dev_p: pointer to device capabilities structure
2650  * @cap: capability element to parse
2651  *
2652  * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2653  */
2654 static void
2655 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2656 			      struct ice_aqc_list_caps_elem *cap)
2657 {
2658 	u32 number = LE32_TO_CPU(cap->number);
2659 
2660 	dev_p->num_funcs = ice_hweight32(number);
2661 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2662 		  dev_p->num_funcs);
2663 
2664 }
2665 
2666 /**
2667  * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2668  * @hw: pointer to the HW struct
2669  * @dev_p: pointer to device capabilities structure
2670  * @cap: capability element to parse
2671  *
2672  * Parse ICE_AQC_CAPS_VF for device capabilities.
2673  */
2674 static void
2675 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2676 		      struct ice_aqc_list_caps_elem *cap)
2677 {
2678 	u32 number = LE32_TO_CPU(cap->number);
2679 
2680 	dev_p->num_vfs_exposed = number;
2681 	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2682 		  dev_p->num_vfs_exposed);
2683 }
2684 
2685 /**
2686  * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2687  * @hw: pointer to the HW struct
2688  * @dev_p: pointer to device capabilities structure
2689  * @cap: capability element to parse
2690  *
2691  * Parse ICE_AQC_CAPS_VSI for device capabilities.
2692  */
2693 static void
2694 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2695 		       struct ice_aqc_list_caps_elem *cap)
2696 {
2697 	u32 number = LE32_TO_CPU(cap->number);
2698 
2699 	dev_p->num_vsi_allocd_to_host = number;
2700 	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2701 		  dev_p->num_vsi_allocd_to_host);
2702 }
2703 
2704 /**
2705  * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
2706  * @hw: pointer to the HW struct
2707  * @dev_p: pointer to device capabilities structure
2708  * @cap: capability element to parse
2709  *
2710  * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
2711  */
2712 static void
2713 ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2714 			    struct ice_aqc_list_caps_elem *cap)
2715 {
2716 	dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
2717 	dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
2718 
2719 	ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
2720 		  !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
2721 	ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
2722 		  !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
2723 	ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
2724 		  dev_p->nac_topo.id);
2725 }
2726 
2727 /**
2728  * ice_parse_dev_caps - Parse device capabilities
2729  * @hw: pointer to the HW struct
2730  * @dev_p: pointer to device capabilities structure
2731  * @buf: buffer containing the device capability records
2732  * @cap_count: the number of capabilities
2733  *
2734  * Helper device to parse device (0x000B) capabilities list. For
2735  * capabilities shared between device and function, this relies on
2736  * ice_parse_common_caps.
2737  *
2738  * Loop through the list of provided capabilities and extract the relevant
2739  * data into the device capabilities structured.
2740  */
2741 static void
2742 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2743 		   void *buf, u32 cap_count)
2744 {
2745 	struct ice_aqc_list_caps_elem *cap_resp;
2746 	u32 i;
2747 
2748 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2749 
2750 	ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2751 
2752 	for (i = 0; i < cap_count; i++) {
2753 		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2754 		bool found;
2755 
2756 		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2757 					      &cap_resp[i], "dev caps");
2758 
2759 		switch (cap) {
2760 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2761 			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2762 			break;
2763 		case ICE_AQC_CAPS_VF:
2764 			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2765 			break;
2766 		case ICE_AQC_CAPS_VSI:
2767 			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2768 			break;
2769 		case ICE_AQC_CAPS_NAC_TOPOLOGY:
2770 			ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
2771 			break;
2772 		default:
2773 			/* Don't list common capabilities as unknown */
2774 			if (!found)
2775 				ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2776 					  i, cap);
2777 			break;
2778 		}
2779 	}
2780 
2781 	ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
2782 	ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
2783 
2784 	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2785 }
2786 
2787 /**
2788  * ice_aq_list_caps - query function/device capabilities
2789  * @hw: pointer to the HW struct
2790  * @buf: a buffer to hold the capabilities
2791  * @buf_size: size of the buffer
2792  * @cap_count: if not NULL, set to the number of capabilities reported
2793  * @opc: capabilities type to discover, device or function
2794  * @cd: pointer to command details structure or NULL
2795  *
2796  * Get the function (0x000A) or device (0x000B) capabilities description from
2797  * firmware and store it in the buffer.
2798  *
2799  * If the cap_count pointer is not NULL, then it is set to the number of
2800  * capabilities firmware will report. Note that if the buffer size is too
2801  * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2802  * cap_count will still be updated in this case. It is recommended that the
2803  * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2804  * firmware could return) to avoid this.
2805  */
2806 static enum ice_status
2807 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2808 		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2809 {
2810 	struct ice_aqc_list_caps *cmd;
2811 	struct ice_aq_desc desc;
2812 	enum ice_status status;
2813 
2814 	cmd = &desc.params.get_cap;
2815 
2816 	if (opc != ice_aqc_opc_list_func_caps &&
2817 	    opc != ice_aqc_opc_list_dev_caps)
2818 		return ICE_ERR_PARAM;
2819 
2820 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2821 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2822 
2823 	if (cap_count)
2824 		*cap_count = LE32_TO_CPU(cmd->count);
2825 
2826 	return status;
2827 }
2828 
2829 /**
2830  * ice_discover_dev_caps - Read and extract device capabilities
2831  * @hw: pointer to the hardware structure
2832  * @dev_caps: pointer to device capabilities structure
2833  *
2834  * Read the device capabilities and extract them into the dev_caps structure
2835  * for later use.
2836  */
2837 static enum ice_status
2838 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2839 {
2840 	enum ice_status status;
2841 	u32 cap_count = 0;
2842 	void *cbuf;
2843 
2844 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2845 	if (!cbuf)
2846 		return ICE_ERR_NO_MEMORY;
2847 
2848 	/* Although the driver doesn't know the number of capabilities the
2849 	 * device will return, we can simply send a 4KB buffer, the maximum
2850 	 * possible size that firmware can return.
2851 	 */
2852 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2853 
2854 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2855 				  ice_aqc_opc_list_dev_caps, NULL);
2856 	if (!status)
2857 		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2858 	ice_free(hw, cbuf);
2859 
2860 	return status;
2861 }
2862 
2863 /**
2864  * ice_discover_func_caps - Read and extract function capabilities
2865  * @hw: pointer to the hardware structure
2866  * @func_caps: pointer to function capabilities structure
2867  *
2868  * Read the function capabilities and extract them into the func_caps structure
2869  * for later use.
2870  */
2871 static enum ice_status
2872 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2873 {
2874 	enum ice_status status;
2875 	u32 cap_count = 0;
2876 	void *cbuf;
2877 
2878 	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2879 	if (!cbuf)
2880 		return ICE_ERR_NO_MEMORY;
2881 
2882 	/* Although the driver doesn't know the number of capabilities the
2883 	 * device will return, we can simply send a 4KB buffer, the maximum
2884 	 * possible size that firmware can return.
2885 	 */
2886 	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2887 
2888 	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2889 				  ice_aqc_opc_list_func_caps, NULL);
2890 	if (!status)
2891 		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2892 	ice_free(hw, cbuf);
2893 
2894 	return status;
2895 }
2896 
2897 /**
2898  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2899  * @hw: pointer to the hardware structure
2900  */
2901 void ice_set_safe_mode_caps(struct ice_hw *hw)
2902 {
2903 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2904 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2905 	struct ice_hw_common_caps cached_caps;
2906 	u32 num_funcs;
2907 
2908 	/* cache some func_caps values that should be restored after memset */
2909 	cached_caps = func_caps->common_cap;
2910 
2911 	/* unset func capabilities */
2912 	memset(func_caps, 0, sizeof(*func_caps));
2913 
2914 #define ICE_RESTORE_FUNC_CAP(name) \
2915 	func_caps->common_cap.name = cached_caps.name
2916 
2917 	/* restore cached values */
2918 	ICE_RESTORE_FUNC_CAP(valid_functions);
2919 	ICE_RESTORE_FUNC_CAP(txq_first_id);
2920 	ICE_RESTORE_FUNC_CAP(rxq_first_id);
2921 	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2922 	ICE_RESTORE_FUNC_CAP(max_mtu);
2923 	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2924 
2925 	/* one Tx and one Rx queue in safe mode */
2926 	func_caps->common_cap.num_rxq = 1;
2927 	func_caps->common_cap.num_txq = 1;
2928 
2929 	/* two MSIX vectors, one for traffic and one for misc causes */
2930 	func_caps->common_cap.num_msix_vectors = 2;
2931 	func_caps->guar_num_vsi = 1;
2932 
2933 	/* cache some dev_caps values that should be restored after memset */
2934 	cached_caps = dev_caps->common_cap;
2935 	num_funcs = dev_caps->num_funcs;
2936 
2937 	/* unset dev capabilities */
2938 	memset(dev_caps, 0, sizeof(*dev_caps));
2939 
2940 #define ICE_RESTORE_DEV_CAP(name) \
2941 	dev_caps->common_cap.name = cached_caps.name
2942 
2943 	/* restore cached values */
2944 	ICE_RESTORE_DEV_CAP(valid_functions);
2945 	ICE_RESTORE_DEV_CAP(txq_first_id);
2946 	ICE_RESTORE_DEV_CAP(rxq_first_id);
2947 	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2948 	ICE_RESTORE_DEV_CAP(max_mtu);
2949 	ICE_RESTORE_DEV_CAP(nvm_unified_update);
2950 	dev_caps->num_funcs = num_funcs;
2951 
2952 	/* one Tx and one Rx queue per function in safe mode */
2953 	dev_caps->common_cap.num_rxq = num_funcs;
2954 	dev_caps->common_cap.num_txq = num_funcs;
2955 
2956 	/* two MSIX vectors per function */
2957 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2958 }
2959 
2960 /**
2961  * ice_get_caps - get info about the HW
2962  * @hw: pointer to the hardware structure
2963  */
2964 enum ice_status ice_get_caps(struct ice_hw *hw)
2965 {
2966 	enum ice_status status;
2967 
2968 	status = ice_discover_dev_caps(hw, &hw->dev_caps);
2969 	if (status)
2970 		return status;
2971 
2972 	return ice_discover_func_caps(hw, &hw->func_caps);
2973 }
2974 
2975 /**
2976  * ice_aq_manage_mac_write - manage MAC address write command
2977  * @hw: pointer to the HW struct
2978  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2979  * @flags: flags to control write behavior
2980  * @cd: pointer to command details structure or NULL
2981  *
2982  * This function is used to write MAC address to the NVM (0x0108).
2983  */
2984 enum ice_status
2985 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2986 			struct ice_sq_cd *cd)
2987 {
2988 	struct ice_aqc_manage_mac_write *cmd;
2989 	struct ice_aq_desc desc;
2990 
2991 	cmd = &desc.params.mac_write;
2992 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2993 
2994 	cmd->flags = flags;
2995 	ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2996 
2997 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2998 }
2999 
3000 /**
3001  * ice_aq_clear_pxe_mode
3002  * @hw: pointer to the HW struct
3003  *
3004  * Tell the firmware that the driver is taking over from PXE (0x0110).
3005  */
3006 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
3007 {
3008 	struct ice_aq_desc desc;
3009 
3010 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
3011 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
3012 
3013 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3014 }
3015 
3016 /**
3017  * ice_clear_pxe_mode - clear pxe operations mode
3018  * @hw: pointer to the HW struct
3019  *
3020  * Make sure all PXE mode settings are cleared, including things
3021  * like descriptor fetch/write-back mode.
3022  */
3023 void ice_clear_pxe_mode(struct ice_hw *hw)
3024 {
3025 	if (ice_check_sq_alive(hw, &hw->adminq))
3026 		ice_aq_clear_pxe_mode(hw);
3027 }
3028 
3029 /**
3030  * ice_aq_set_port_params - set physical port parameters.
3031  * @pi: pointer to the port info struct
3032  * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
3033  * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
3034  * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
3035  * @double_vlan: if set double VLAN is enabled
3036  * @cd: pointer to command details structure or NULL
3037  *
3038  * Set Physical port parameters (0x0203)
3039  */
3040 enum ice_status
3041 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
3042 		       bool save_bad_pac, bool pad_short_pac, bool double_vlan,
3043 		       struct ice_sq_cd *cd)
3044 
3045 {
3046 	struct ice_aqc_set_port_params *cmd;
3047 	struct ice_hw *hw = pi->hw;
3048 	struct ice_aq_desc desc;
3049 	u16 cmd_flags = 0;
3050 
3051 	cmd = &desc.params.set_port_params;
3052 
3053 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
3054 	cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
3055 	if (save_bad_pac)
3056 		cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
3057 	if (pad_short_pac)
3058 		cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
3059 	if (double_vlan)
3060 		cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
3061 	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
3062 
3063 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3064 }
3065 
3066 /**
3067  * ice_is_100m_speed_supported
3068  * @hw: pointer to the HW struct
3069  *
3070  * returns true if 100M speeds are supported by the device,
3071  * false otherwise.
3072  */
3073 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3074 {
3075 	switch (hw->device_id) {
3076 	case ICE_DEV_ID_E822C_SGMII:
3077 	case ICE_DEV_ID_E822L_SGMII:
3078 	case ICE_DEV_ID_E823L_1GBE:
3079 	case ICE_DEV_ID_E823C_SGMII:
3080 		return true;
3081 	default:
3082 		return false;
3083 	}
3084 }
3085 
3086 /**
3087  * ice_get_link_speed_based_on_phy_type - returns link speed
3088  * @phy_type_low: lower part of phy_type
3089  * @phy_type_high: higher part of phy_type
3090  *
3091  * This helper function will convert an entry in PHY type structure
3092  * [phy_type_low, phy_type_high] to its corresponding link speed.
3093  * Note: In the structure of [phy_type_low, phy_type_high], there should
3094  * be one bit set, as this function will convert one PHY type to its
3095  * speed.
3096  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3097  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3098  */
3099 static u16
3100 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3101 {
3102 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3103 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3104 
3105 	switch (phy_type_low) {
3106 	case ICE_PHY_TYPE_LOW_100BASE_TX:
3107 	case ICE_PHY_TYPE_LOW_100M_SGMII:
3108 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3109 		break;
3110 	case ICE_PHY_TYPE_LOW_1000BASE_T:
3111 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
3112 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
3113 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
3114 	case ICE_PHY_TYPE_LOW_1G_SGMII:
3115 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3116 		break;
3117 	case ICE_PHY_TYPE_LOW_2500BASE_T:
3118 	case ICE_PHY_TYPE_LOW_2500BASE_X:
3119 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
3120 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3121 		break;
3122 	case ICE_PHY_TYPE_LOW_5GBASE_T:
3123 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
3124 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3125 		break;
3126 	case ICE_PHY_TYPE_LOW_10GBASE_T:
3127 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3128 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
3129 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
3130 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3131 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3132 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3133 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3134 		break;
3135 	case ICE_PHY_TYPE_LOW_25GBASE_T:
3136 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
3137 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3138 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3139 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
3140 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
3141 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
3142 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3143 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3144 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3145 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3146 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3147 		break;
3148 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3149 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3150 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3151 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3152 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3153 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
3154 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3155 		break;
3156 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3157 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3158 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3159 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3160 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3161 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
3162 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3163 	case ICE_PHY_TYPE_LOW_50G_AUI2:
3164 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
3165 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
3166 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
3167 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
3168 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3169 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3170 	case ICE_PHY_TYPE_LOW_50G_AUI1:
3171 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3172 		break;
3173 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3174 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3175 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3176 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3177 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3178 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
3179 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3180 	case ICE_PHY_TYPE_LOW_100G_AUI4:
3181 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3182 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3183 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3184 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3185 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
3186 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3187 		break;
3188 	default:
3189 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3190 		break;
3191 	}
3192 
3193 	switch (phy_type_high) {
3194 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3195 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3196 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3197 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3198 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
3199 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3200 		break;
3201 	default:
3202 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3203 		break;
3204 	}
3205 
3206 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3207 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3208 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3209 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3210 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3211 		return ICE_AQ_LINK_SPEED_UNKNOWN;
3212 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3213 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3214 		return speed_phy_type_low;
3215 	else
3216 		return speed_phy_type_high;
3217 }
3218 
3219 /**
3220  * ice_update_phy_type
3221  * @phy_type_low: pointer to the lower part of phy_type
3222  * @phy_type_high: pointer to the higher part of phy_type
3223  * @link_speeds_bitmap: targeted link speeds bitmap
3224  *
3225  * Note: For the link_speeds_bitmap structure, you can check it at
3226  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3227  * link_speeds_bitmap include multiple speeds.
3228  *
3229  * Each entry in this [phy_type_low, phy_type_high] structure will
3230  * present a certain link speed. This helper function will turn on bits
3231  * in [phy_type_low, phy_type_high] structure based on the value of
3232  * link_speeds_bitmap input parameter.
3233  */
3234 void
3235 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3236 		    u16 link_speeds_bitmap)
3237 {
3238 	u64 pt_high;
3239 	u64 pt_low;
3240 	int index;
3241 	u16 speed;
3242 
3243 	/* We first check with low part of phy_type */
3244 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3245 		pt_low = BIT_ULL(index);
3246 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3247 
3248 		if (link_speeds_bitmap & speed)
3249 			*phy_type_low |= BIT_ULL(index);
3250 	}
3251 
3252 	/* We then check with high part of phy_type */
3253 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3254 		pt_high = BIT_ULL(index);
3255 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3256 
3257 		if (link_speeds_bitmap & speed)
3258 			*phy_type_high |= BIT_ULL(index);
3259 	}
3260 }
3261 
3262 /**
3263  * ice_aq_set_phy_cfg
3264  * @hw: pointer to the HW struct
3265  * @pi: port info structure of the interested logical port
3266  * @cfg: structure with PHY configuration data to be set
3267  * @cd: pointer to command details structure or NULL
3268  *
3269  * Set the various PHY configuration parameters supported on the Port.
3270  * One or more of the Set PHY config parameters may be ignored in an MFP
3271  * mode as the PF may not have the privilege to set some of the PHY Config
3272  * parameters. This status will be indicated by the command response (0x0601).
3273  */
3274 enum ice_status
3275 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3276 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3277 {
3278 	struct ice_aq_desc desc;
3279 	enum ice_status status;
3280 
3281 	if (!cfg)
3282 		return ICE_ERR_PARAM;
3283 
3284 	/* Ensure that only valid bits of cfg->caps can be turned on. */
3285 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3286 		ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3287 			  cfg->caps);
3288 
3289 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3290 	}
3291 
3292 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3293 	desc.params.set_phy.lport_num = pi->lport;
3294 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3295 
3296 	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3297 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
3298 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3299 	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
3300 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3301 	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
3302 	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
3303 		  cfg->low_power_ctrl_an);
3304 	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
3305 	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
3306 	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
3307 		  cfg->link_fec_opt);
3308 
3309 	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3310 
3311 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3312 		status = ICE_SUCCESS;
3313 
3314 	if (!status)
3315 		pi->phy.curr_user_phy_cfg = *cfg;
3316 
3317 	return status;
3318 }
3319 
3320 /**
3321  * ice_update_link_info - update status of the HW network link
3322  * @pi: port info structure of the interested logical port
3323  */
3324 enum ice_status ice_update_link_info(struct ice_port_info *pi)
3325 {
3326 	struct ice_link_status *li;
3327 	enum ice_status status;
3328 
3329 	if (!pi)
3330 		return ICE_ERR_PARAM;
3331 
3332 	li = &pi->phy.link_info;
3333 
3334 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
3335 	if (status)
3336 		return status;
3337 
3338 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3339 		struct ice_aqc_get_phy_caps_data *pcaps;
3340 		struct ice_hw *hw;
3341 
3342 		hw = pi->hw;
3343 		pcaps = (struct ice_aqc_get_phy_caps_data *)
3344 			ice_malloc(hw, sizeof(*pcaps));
3345 		if (!pcaps)
3346 			return ICE_ERR_NO_MEMORY;
3347 
3348 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3349 					     pcaps, NULL);
3350 
3351 		if (status == ICE_SUCCESS)
3352 			ice_memcpy(li->module_type, &pcaps->module_type,
3353 				   sizeof(li->module_type),
3354 				   ICE_NONDMA_TO_NONDMA);
3355 
3356 		ice_free(hw, pcaps);
3357 	}
3358 
3359 	return status;
3360 }
3361 
3362 /**
3363  * ice_cache_phy_user_req
3364  * @pi: port information structure
3365  * @cache_data: PHY logging data
3366  * @cache_mode: PHY logging mode
3367  *
3368  * Log the user request on (FC, FEC, SPEED) for later user.
3369  */
3370 static void
3371 ice_cache_phy_user_req(struct ice_port_info *pi,
3372 		       struct ice_phy_cache_mode_data cache_data,
3373 		       enum ice_phy_cache_mode cache_mode)
3374 {
3375 	if (!pi)
3376 		return;
3377 
3378 	switch (cache_mode) {
3379 	case ICE_FC_MODE:
3380 		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3381 		break;
3382 	case ICE_SPEED_MODE:
3383 		pi->phy.curr_user_speed_req =
3384 			cache_data.data.curr_user_speed_req;
3385 		break;
3386 	case ICE_FEC_MODE:
3387 		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3388 		break;
3389 	default:
3390 		break;
3391 	}
3392 }
3393 
3394 /**
3395  * ice_caps_to_fc_mode
3396  * @caps: PHY capabilities
3397  *
3398  * Convert PHY FC capabilities to ice FC mode
3399  */
3400 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3401 {
3402 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3403 	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3404 		return ICE_FC_FULL;
3405 
3406 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3407 		return ICE_FC_TX_PAUSE;
3408 
3409 	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3410 		return ICE_FC_RX_PAUSE;
3411 
3412 	return ICE_FC_NONE;
3413 }
3414 
3415 /**
3416  * ice_caps_to_fec_mode
3417  * @caps: PHY capabilities
3418  * @fec_options: Link FEC options
3419  *
3420  * Convert PHY FEC capabilities to ice FEC mode
3421  */
3422 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3423 {
3424 	if (caps & ICE_AQC_PHY_EN_AUTO_FEC) {
3425 		if (fec_options & ICE_AQC_PHY_FEC_DIS)
3426 			return ICE_FEC_DIS_AUTO;
3427 		else
3428 			return ICE_FEC_AUTO;
3429 	}
3430 
3431 	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3432 			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3433 			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3434 			   ICE_AQC_PHY_FEC_25G_KR_REQ))
3435 		return ICE_FEC_BASER;
3436 
3437 	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3438 			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3439 			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3440 		return ICE_FEC_RS;
3441 
3442 	return ICE_FEC_NONE;
3443 }
3444 
3445 /**
3446  * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3447  * @pi: port information structure
3448  * @cfg: PHY configuration data to set FC mode
3449  * @req_mode: FC mode to configure
3450  */
3451 static enum ice_status
3452 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3453 	       enum ice_fc_mode req_mode)
3454 {
3455 	struct ice_phy_cache_mode_data cache_data;
3456 	u8 pause_mask = 0x0;
3457 
3458 	if (!pi || !cfg)
3459 		return ICE_ERR_BAD_PTR;
3460 	switch (req_mode) {
3461 	case ICE_FC_AUTO:
3462 	{
3463 		struct ice_aqc_get_phy_caps_data *pcaps;
3464 		enum ice_status status;
3465 
3466 		pcaps = (struct ice_aqc_get_phy_caps_data *)
3467 			ice_malloc(pi->hw, sizeof(*pcaps));
3468 		if (!pcaps)
3469 			return ICE_ERR_NO_MEMORY;
3470 		/* Query the value of FC that both the NIC and attached media
3471 		 * can do.
3472 		 */
3473 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3474 					     pcaps, NULL);
3475 		if (status) {
3476 			ice_free(pi->hw, pcaps);
3477 			return status;
3478 		}
3479 
3480 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3481 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3482 
3483 		ice_free(pi->hw, pcaps);
3484 		break;
3485 	}
3486 	case ICE_FC_FULL:
3487 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3488 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3489 		break;
3490 	case ICE_FC_RX_PAUSE:
3491 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3492 		break;
3493 	case ICE_FC_TX_PAUSE:
3494 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3495 		break;
3496 	default:
3497 		break;
3498 	}
3499 
3500 	/* clear the old pause settings */
3501 	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3502 		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3503 
3504 	/* set the new capabilities */
3505 	cfg->caps |= pause_mask;
3506 
3507 	/* Cache user FC request */
3508 	cache_data.data.curr_user_fc_req = req_mode;
3509 	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3510 
3511 	return ICE_SUCCESS;
3512 }
3513 
3514 /**
3515  * ice_set_fc
3516  * @pi: port information structure
3517  * @aq_failures: pointer to status code, specific to ice_set_fc routine
3518  * @ena_auto_link_update: enable automatic link update
3519  *
3520  * Set the requested flow control mode.
3521  */
3522 enum ice_status
3523 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3524 {
3525 	struct ice_aqc_set_phy_cfg_data  cfg = { 0 };
3526 	struct ice_aqc_get_phy_caps_data *pcaps;
3527 	enum ice_status status;
3528 	struct ice_hw *hw;
3529 
3530 	if (!pi || !aq_failures)
3531 		return ICE_ERR_BAD_PTR;
3532 
3533 	*aq_failures = 0;
3534 	hw = pi->hw;
3535 
3536 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3537 		ice_malloc(hw, sizeof(*pcaps));
3538 	if (!pcaps)
3539 		return ICE_ERR_NO_MEMORY;
3540 
3541 	/* Get the current PHY config */
3542 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3543 				     pcaps, NULL);
3544 
3545 	if (status) {
3546 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3547 		goto out;
3548 	}
3549 
3550 	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3551 
3552 	/* Configure the set PHY data */
3553 	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3554 	if (status) {
3555 		if (status != ICE_ERR_BAD_PTR)
3556 			*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3557 
3558 		goto out;
3559 	}
3560 
3561 	/* If the capabilities have changed, then set the new config */
3562 	if (cfg.caps != pcaps->caps) {
3563 		int retry_count, retry_max = 10;
3564 
3565 		/* Auto restart link so settings take effect */
3566 		if (ena_auto_link_update)
3567 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3568 
3569 		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3570 		if (status) {
3571 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3572 			goto out;
3573 		}
3574 
3575 		/* Update the link info
3576 		 * It sometimes takes a really long time for link to
3577 		 * come back from the atomic reset. Thus, we wait a
3578 		 * little bit.
3579 		 */
3580 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
3581 			status = ice_update_link_info(pi);
3582 
3583 			if (status == ICE_SUCCESS)
3584 				break;
3585 
3586 			ice_msec_delay(100, true);
3587 		}
3588 
3589 		if (status)
3590 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3591 	}
3592 
3593 out:
3594 	ice_free(hw, pcaps);
3595 	return status;
3596 }
3597 
3598 /**
3599  * ice_phy_caps_equals_cfg
3600  * @phy_caps: PHY capabilities
3601  * @phy_cfg: PHY configuration
3602  *
3603  * Helper function to determine if PHY capabilities matches PHY
3604  * configuration
3605  */
3606 bool
3607 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3608 			struct ice_aqc_set_phy_cfg_data *phy_cfg)
3609 {
3610 	u8 caps_mask, cfg_mask;
3611 
3612 	if (!phy_caps || !phy_cfg)
3613 		return false;
3614 
3615 	/* These bits are not common between capabilities and configuration.
3616 	 * Do not use them to determine equality.
3617 	 */
3618 	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3619 					      ICE_AQC_PHY_EN_MOD_QUAL);
3620 	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3621 
3622 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3623 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3624 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3625 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3626 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
3627 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
3628 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3629 		return false;
3630 
3631 	return true;
3632 }
3633 
3634 /**
3635  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3636  * @pi: port information structure
3637  * @caps: PHY ability structure to copy data from
3638  * @cfg: PHY configuration structure to copy data to
3639  *
3640  * Helper function to copy AQC PHY get ability data to PHY set configuration
3641  * data structure
3642  */
3643 void
3644 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3645 			 struct ice_aqc_get_phy_caps_data *caps,
3646 			 struct ice_aqc_set_phy_cfg_data *cfg)
3647 {
3648 	if (!pi || !caps || !cfg)
3649 		return;
3650 
3651 	ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3652 	cfg->phy_type_low = caps->phy_type_low;
3653 	cfg->phy_type_high = caps->phy_type_high;
3654 	cfg->caps = caps->caps;
3655 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3656 	cfg->eee_cap = caps->eee_cap;
3657 	cfg->eeer_value = caps->eeer_value;
3658 	cfg->link_fec_opt = caps->link_fec_options;
3659 	cfg->module_compliance_enforcement =
3660 		caps->module_compliance_enforcement;
3661 }
3662 
3663 /**
3664  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3665  * @pi: port information structure
3666  * @cfg: PHY configuration data to set FEC mode
3667  * @fec: FEC mode to configure
3668  */
3669 enum ice_status
3670 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3671 		enum ice_fec_mode fec)
3672 {
3673 	struct ice_aqc_get_phy_caps_data *pcaps;
3674 	enum ice_status status = ICE_SUCCESS;
3675 	struct ice_hw *hw;
3676 
3677 	if (!pi || !cfg)
3678 		return ICE_ERR_BAD_PTR;
3679 
3680 	hw = pi->hw;
3681 
3682 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3683 		ice_malloc(hw, sizeof(*pcaps));
3684 	if (!pcaps)
3685 		return ICE_ERR_NO_MEMORY;
3686 
3687 	status = ice_aq_get_phy_caps(pi, false,
3688 				     (ice_fw_supports_report_dflt_cfg(hw) ?
3689 				      ICE_AQC_REPORT_DFLT_CFG :
3690 				      ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3691 
3692 	if (status)
3693 		goto out;
3694 
3695 	cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3696 	cfg->link_fec_opt = pcaps->link_fec_options;
3697 
3698 	switch (fec) {
3699 	case ICE_FEC_BASER:
3700 		/* Clear RS bits, and AND BASE-R ability
3701 		 * bits and OR request bits.
3702 		 */
3703 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3704 			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3705 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3706 			ICE_AQC_PHY_FEC_25G_KR_REQ;
3707 		break;
3708 	case ICE_FEC_RS:
3709 		/* Clear BASE-R bits, and AND RS ability
3710 		 * bits and OR request bits.
3711 		 */
3712 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3713 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3714 			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3715 		break;
3716 	case ICE_FEC_NONE:
3717 		/* Clear all FEC option bits. */
3718 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3719 		break;
3720 	case ICE_FEC_DIS_AUTO:
3721 		/* Set No FEC and auto FEC */
3722 		if (!ice_fw_supports_fec_dis_auto(hw))
3723 			return ICE_ERR_NOT_SUPPORTED;
3724 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
3725 		/* fall-through */
3726 	case ICE_FEC_AUTO:
3727 		/* AND auto FEC bit, and all caps bits. */
3728 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3729 		cfg->link_fec_opt |= pcaps->link_fec_options;
3730 		break;
3731 	default:
3732 		status = ICE_ERR_PARAM;
3733 		break;
3734 	}
3735 
3736 	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3737 	    !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3738 		struct ice_link_default_override_tlv tlv;
3739 
3740 		if (ice_get_link_default_override(&tlv, pi))
3741 			goto out;
3742 
3743 		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3744 		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3745 			cfg->link_fec_opt = tlv.fec_options;
3746 	}
3747 
3748 out:
3749 	ice_free(hw, pcaps);
3750 
3751 	return status;
3752 }
3753 
3754 /**
3755  * ice_get_link_status - get status of the HW network link
3756  * @pi: port information structure
3757  * @link_up: pointer to bool (true/false = linkup/linkdown)
3758  *
3759  * Variable link_up is true if link is up, false if link is down.
3760  * The variable link_up is invalid if status is non zero. As a
3761  * result of this call, link status reporting becomes enabled
3762  */
3763 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3764 {
3765 	struct ice_phy_info *phy_info;
3766 	enum ice_status status = ICE_SUCCESS;
3767 
3768 	if (!pi || !link_up)
3769 		return ICE_ERR_PARAM;
3770 
3771 	phy_info = &pi->phy;
3772 
3773 	if (phy_info->get_link_info) {
3774 		status = ice_update_link_info(pi);
3775 
3776 		if (status)
3777 			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3778 				  status);
3779 	}
3780 
3781 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3782 
3783 	return status;
3784 }
3785 
3786 /**
3787  * ice_aq_set_link_restart_an
3788  * @pi: pointer to the port information structure
3789  * @ena_link: if true: enable link, if false: disable link
3790  * @cd: pointer to command details structure or NULL
3791  *
3792  * Sets up the link and restarts the Auto-Negotiation over the link.
3793  */
3794 enum ice_status
3795 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3796 			   struct ice_sq_cd *cd)
3797 {
3798 	struct ice_aqc_restart_an *cmd;
3799 	struct ice_aq_desc desc;
3800 
3801 	cmd = &desc.params.restart_an;
3802 
3803 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3804 
3805 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3806 	cmd->lport_num = pi->lport;
3807 	if (ena_link)
3808 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3809 	else
3810 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3811 
3812 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3813 }
3814 
3815 /**
3816  * ice_aq_set_event_mask
3817  * @hw: pointer to the HW struct
3818  * @port_num: port number of the physical function
3819  * @mask: event mask to be set
3820  * @cd: pointer to command details structure or NULL
3821  *
3822  * Set event mask (0x0613)
3823  */
3824 enum ice_status
3825 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3826 		      struct ice_sq_cd *cd)
3827 {
3828 	struct ice_aqc_set_event_mask *cmd;
3829 	struct ice_aq_desc desc;
3830 
3831 	cmd = &desc.params.set_event_mask;
3832 
3833 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3834 
3835 	cmd->lport_num = port_num;
3836 
3837 	cmd->event_mask = CPU_TO_LE16(mask);
3838 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3839 }
3840 
3841 /**
3842  * ice_aq_set_mac_loopback
3843  * @hw: pointer to the HW struct
3844  * @ena_lpbk: Enable or Disable loopback
3845  * @cd: pointer to command details structure or NULL
3846  *
3847  * Enable/disable loopback on a given port
3848  */
3849 enum ice_status
3850 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3851 {
3852 	struct ice_aqc_set_mac_lb *cmd;
3853 	struct ice_aq_desc desc;
3854 
3855 	cmd = &desc.params.set_mac_lb;
3856 
3857 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3858 	if (ena_lpbk)
3859 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3860 
3861 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3862 }
3863 
3864 /**
3865  * ice_aq_set_port_id_led
3866  * @pi: pointer to the port information
3867  * @is_orig_mode: is this LED set to original mode (by the net-list)
3868  * @cd: pointer to command details structure or NULL
3869  *
3870  * Set LED value for the given port (0x06e9)
3871  */
3872 enum ice_status
3873 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3874 		       struct ice_sq_cd *cd)
3875 {
3876 	struct ice_aqc_set_port_id_led *cmd;
3877 	struct ice_hw *hw = pi->hw;
3878 	struct ice_aq_desc desc;
3879 
3880 	cmd = &desc.params.set_port_id_led;
3881 
3882 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3883 
3884 	if (is_orig_mode)
3885 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3886 	else
3887 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3888 
3889 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3890 }
3891 
3892 /**
3893  * ice_aq_sff_eeprom
3894  * @hw: pointer to the HW struct
3895  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3896  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3897  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3898  * @page: QSFP page
3899  * @set_page: set or ignore the page
3900  * @data: pointer to data buffer to be read/written to the I2C device.
3901  * @length: 1-16 for read, 1 for write.
3902  * @write: 0 read, 1 for write.
3903  * @cd: pointer to command details structure or NULL
3904  *
3905  * Read/Write SFF EEPROM (0x06EE)
3906  */
3907 enum ice_status
3908 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3909 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3910 		  bool write, struct ice_sq_cd *cd)
3911 {
3912 	struct ice_aqc_sff_eeprom *cmd;
3913 	struct ice_aq_desc desc;
3914 	enum ice_status status;
3915 
3916 	if (!data || (mem_addr & 0xff00))
3917 		return ICE_ERR_PARAM;
3918 
3919 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3920 	cmd = &desc.params.read_write_sff_param;
3921 	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3922 	cmd->lport_num = (u8)(lport & 0xff);
3923 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3924 	cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3925 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3926 					((set_page <<
3927 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3928 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3929 	cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3930 	cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3931 	if (write)
3932 		cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3933 
3934 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3935 	return status;
3936 }
3937 
3938 /**
3939  * ice_aq_prog_topo_dev_nvm
3940  * @hw: pointer to the hardware structure
3941  * @topo_params: pointer to structure storing topology parameters for a device
3942  * @cd: pointer to command details structure or NULL
3943  *
3944  * Program Topology Device NVM (0x06F2)
3945  *
3946  */
3947 enum ice_status
3948 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
3949 			 struct ice_aqc_link_topo_params *topo_params,
3950 			 struct ice_sq_cd *cd)
3951 {
3952 	struct ice_aqc_prog_topo_dev_nvm *cmd;
3953 	struct ice_aq_desc desc;
3954 
3955 	cmd = &desc.params.prog_topo_dev_nvm;
3956 
3957 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
3958 
3959 	ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3960 		   ICE_NONDMA_TO_NONDMA);
3961 
3962 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3963 }
3964 
3965 /**
3966  * ice_aq_read_topo_dev_nvm
3967  * @hw: pointer to the hardware structure
3968  * @topo_params: pointer to structure storing topology parameters for a device
3969  * @start_address: byte offset in the topology device NVM
3970  * @data: pointer to data buffer
3971  * @data_size: number of bytes to be read from the topology device NVM
3972  * @cd: pointer to command details structure or NULL
3973  * Read Topology Device NVM (0x06F3)
3974  *
3975  */
3976 enum ice_status
3977 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
3978 			 struct ice_aqc_link_topo_params *topo_params,
3979 			 u32 start_address, u8 *data, u8 data_size,
3980 			 struct ice_sq_cd *cd)
3981 {
3982 	struct ice_aqc_read_topo_dev_nvm *cmd;
3983 	struct ice_aq_desc desc;
3984 	enum ice_status status;
3985 
3986 	if (!data || data_size == 0 ||
3987 	    data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
3988 		return ICE_ERR_PARAM;
3989 
3990 	cmd = &desc.params.read_topo_dev_nvm;
3991 
3992 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
3993 
3994 	desc.datalen = CPU_TO_LE16(data_size);
3995 	ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3996 		   ICE_NONDMA_TO_NONDMA);
3997 	cmd->start_address = CPU_TO_LE32(start_address);
3998 
3999 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4000 	if (status)
4001 		return status;
4002 
4003 	ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
4004 
4005 	return ICE_SUCCESS;
4006 }
4007 
4008 /**
4009  * __ice_aq_get_set_rss_lut
4010  * @hw: pointer to the hardware structure
4011  * @params: RSS LUT parameters
4012  * @set: set true to set the table, false to get the table
4013  *
4014  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
4015  */
4016 static enum ice_status
4017 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
4018 {
4019 	u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
4020 	struct ice_aqc_get_set_rss_lut *cmd_resp;
4021 	struct ice_aq_desc desc;
4022 	enum ice_status status;
4023 	u8 *lut;
4024 
4025 	if (!params)
4026 		return ICE_ERR_PARAM;
4027 
4028 	vsi_handle = params->vsi_handle;
4029 	lut = params->lut;
4030 
4031 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
4032 		return ICE_ERR_PARAM;
4033 
4034 	lut_size = params->lut_size;
4035 	lut_type = params->lut_type;
4036 	glob_lut_idx = params->global_lut_id;
4037 	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4038 
4039 	cmd_resp = &desc.params.get_set_rss_lut;
4040 
4041 	if (set) {
4042 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
4043 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4044 	} else {
4045 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
4046 	}
4047 
4048 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4049 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
4050 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
4051 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
4052 
4053 	switch (lut_type) {
4054 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
4055 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
4056 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
4057 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
4058 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
4059 		break;
4060 	default:
4061 		status = ICE_ERR_PARAM;
4062 		goto ice_aq_get_set_rss_lut_exit;
4063 	}
4064 
4065 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
4066 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
4067 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
4068 
4069 		if (!set)
4070 			goto ice_aq_get_set_rss_lut_send;
4071 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
4072 		if (!set)
4073 			goto ice_aq_get_set_rss_lut_send;
4074 	} else {
4075 		goto ice_aq_get_set_rss_lut_send;
4076 	}
4077 
4078 	/* LUT size is only valid for Global and PF table types */
4079 	switch (lut_size) {
4080 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
4081 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
4082 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4083 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4084 		break;
4085 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
4086 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
4087 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4088 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4089 		break;
4090 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
4091 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
4092 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
4093 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4094 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4095 			break;
4096 		}
4097 		/* fall-through */
4098 	default:
4099 		status = ICE_ERR_PARAM;
4100 		goto ice_aq_get_set_rss_lut_exit;
4101 	}
4102 
4103 ice_aq_get_set_rss_lut_send:
4104 	cmd_resp->flags = CPU_TO_LE16(flags);
4105 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4106 
4107 ice_aq_get_set_rss_lut_exit:
4108 	return status;
4109 }
4110 
4111 /**
4112  * ice_aq_get_rss_lut
4113  * @hw: pointer to the hardware structure
4114  * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4115  *
4116  * get the RSS lookup table, PF or VSI type
4117  */
4118 enum ice_status
4119 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4120 {
4121 	return __ice_aq_get_set_rss_lut(hw, get_params, false);
4122 }
4123 
4124 /**
4125  * ice_aq_set_rss_lut
4126  * @hw: pointer to the hardware structure
4127  * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4128  *
4129  * set the RSS lookup table, PF or VSI type
4130  */
4131 enum ice_status
4132 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4133 {
4134 	return __ice_aq_get_set_rss_lut(hw, set_params, true);
4135 }
4136 
4137 /**
4138  * __ice_aq_get_set_rss_key
4139  * @hw: pointer to the HW struct
4140  * @vsi_id: VSI FW index
4141  * @key: pointer to key info struct
4142  * @set: set true to set the key, false to get the key
4143  *
4144  * get (0x0B04) or set (0x0B02) the RSS key per VSI
4145  */
4146 static enum
4147 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4148 				    struct ice_aqc_get_set_rss_keys *key,
4149 				    bool set)
4150 {
4151 	struct ice_aqc_get_set_rss_key *cmd_resp;
4152 	u16 key_size = sizeof(*key);
4153 	struct ice_aq_desc desc;
4154 
4155 	cmd_resp = &desc.params.get_set_rss_key;
4156 
4157 	if (set) {
4158 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4159 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4160 	} else {
4161 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4162 	}
4163 
4164 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4165 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
4166 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
4167 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
4168 
4169 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4170 }
4171 
4172 /**
4173  * ice_aq_get_rss_key
4174  * @hw: pointer to the HW struct
4175  * @vsi_handle: software VSI handle
4176  * @key: pointer to key info struct
4177  *
4178  * get the RSS key per VSI
4179  */
4180 enum ice_status
4181 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4182 		   struct ice_aqc_get_set_rss_keys *key)
4183 {
4184 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4185 		return ICE_ERR_PARAM;
4186 
4187 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4188 					key, false);
4189 }
4190 
4191 /**
4192  * ice_aq_set_rss_key
4193  * @hw: pointer to the HW struct
4194  * @vsi_handle: software VSI handle
4195  * @keys: pointer to key info struct
4196  *
4197  * set the RSS key per VSI
4198  */
4199 enum ice_status
4200 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4201 		   struct ice_aqc_get_set_rss_keys *keys)
4202 {
4203 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4204 		return ICE_ERR_PARAM;
4205 
4206 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4207 					keys, true);
4208 }
4209 
4210 /**
4211  * ice_aq_add_lan_txq
4212  * @hw: pointer to the hardware structure
4213  * @num_qgrps: Number of added queue groups
4214  * @qg_list: list of queue groups to be added
4215  * @buf_size: size of buffer for indirect command
4216  * @cd: pointer to command details structure or NULL
4217  *
4218  * Add Tx LAN queue (0x0C30)
4219  *
4220  * NOTE:
4221  * Prior to calling add Tx LAN queue:
4222  * Initialize the following as part of the Tx queue context:
4223  * Completion queue ID if the queue uses Completion queue, Quanta profile,
4224  * Cache profile and Packet shaper profile.
4225  *
4226  * After add Tx LAN queue AQ command is completed:
4227  * Interrupts should be associated with specific queues,
4228  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4229  * flow.
4230  */
4231 enum ice_status
4232 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4233 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4234 		   struct ice_sq_cd *cd)
4235 {
4236 	struct ice_aqc_add_tx_qgrp *list;
4237 	struct ice_aqc_add_txqs *cmd;
4238 	struct ice_aq_desc desc;
4239 	u16 i, sum_size = 0;
4240 
4241 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4242 
4243 	cmd = &desc.params.add_txqs;
4244 
4245 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4246 
4247 	if (!qg_list)
4248 		return ICE_ERR_PARAM;
4249 
4250 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4251 		return ICE_ERR_PARAM;
4252 
4253 	for (i = 0, list = qg_list; i < num_qgrps; i++) {
4254 		sum_size += ice_struct_size(list, txqs, list->num_txqs);
4255 		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4256 						      list->num_txqs);
4257 	}
4258 
4259 	if (buf_size != sum_size)
4260 		return ICE_ERR_PARAM;
4261 
4262 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4263 
4264 	cmd->num_qgrps = num_qgrps;
4265 
4266 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4267 }
4268 
4269 /**
4270  * ice_aq_dis_lan_txq
4271  * @hw: pointer to the hardware structure
4272  * @num_qgrps: number of groups in the list
4273  * @qg_list: the list of groups to disable
4274  * @buf_size: the total size of the qg_list buffer in bytes
4275  * @rst_src: if called due to reset, specifies the reset source
4276  * @vmvf_num: the relative VM or VF number that is undergoing the reset
4277  * @cd: pointer to command details structure or NULL
4278  *
4279  * Disable LAN Tx queue (0x0C31)
4280  */
4281 static enum ice_status
4282 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4283 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4284 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
4285 		   struct ice_sq_cd *cd)
4286 {
4287 	struct ice_aqc_dis_txq_item *item;
4288 	struct ice_aqc_dis_txqs *cmd;
4289 	struct ice_aq_desc desc;
4290 	enum ice_status status;
4291 	u16 i, sz = 0;
4292 
4293 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4294 	cmd = &desc.params.dis_txqs;
4295 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4296 
4297 	/* qg_list can be NULL only in VM/VF reset flow */
4298 	if (!qg_list && !rst_src)
4299 		return ICE_ERR_PARAM;
4300 
4301 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4302 		return ICE_ERR_PARAM;
4303 
4304 	cmd->num_entries = num_qgrps;
4305 
4306 	cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4307 					    ICE_AQC_Q_DIS_TIMEOUT_M);
4308 
4309 	switch (rst_src) {
4310 	case ICE_VM_RESET:
4311 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4312 		cmd->vmvf_and_timeout |=
4313 			CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4314 		break;
4315 	case ICE_VF_RESET:
4316 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4317 		/* In this case, FW expects vmvf_num to be absolute VF ID */
4318 		cmd->vmvf_and_timeout |=
4319 			CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
4320 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
4321 		break;
4322 	case ICE_NO_RESET:
4323 	default:
4324 		break;
4325 	}
4326 
4327 	/* flush pipe on time out */
4328 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4329 	/* If no queue group info, we are in a reset flow. Issue the AQ */
4330 	if (!qg_list)
4331 		goto do_aq;
4332 
4333 	/* set RD bit to indicate that command buffer is provided by the driver
4334 	 * and it needs to be read by the firmware
4335 	 */
4336 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4337 
4338 	for (i = 0, item = qg_list; i < num_qgrps; i++) {
4339 		u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4340 
4341 		/* If the num of queues is even, add 2 bytes of padding */
4342 		if ((item->num_qs % 2) == 0)
4343 			item_size += 2;
4344 
4345 		sz += item_size;
4346 
4347 		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4348 	}
4349 
4350 	if (buf_size != sz)
4351 		return ICE_ERR_PARAM;
4352 
4353 do_aq:
4354 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4355 	if (status) {
4356 		if (!qg_list)
4357 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4358 				  vmvf_num, hw->adminq.sq_last_status);
4359 		else
4360 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4361 				  LE16_TO_CPU(qg_list[0].q_id[0]),
4362 				  hw->adminq.sq_last_status);
4363 	}
4364 	return status;
4365 }
4366 
4367 /**
4368  * ice_aq_move_recfg_lan_txq
4369  * @hw: pointer to the hardware structure
4370  * @num_qs: number of queues to move/reconfigure
4371  * @is_move: true if this operation involves node movement
4372  * @is_tc_change: true if this operation involves a TC change
4373  * @subseq_call: true if this operation is a subsequent call
4374  * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4375  * @timeout: timeout in units of 100 usec (valid values 0-50)
4376  * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4377  * @buf: struct containing src/dest TEID and per-queue info
4378  * @buf_size: size of buffer for indirect command
4379  * @txqs_moved: out param, number of queues successfully moved
4380  * @cd: pointer to command details structure or NULL
4381  *
4382  * Move / Reconfigure Tx LAN queues (0x0C32)
4383  */
4384 enum ice_status
4385 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4386 			  bool is_tc_change, bool subseq_call, bool flush_pipe,
4387 			  u8 timeout, u32 *blocked_cgds,
4388 			  struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4389 			  u8 *txqs_moved, struct ice_sq_cd *cd)
4390 {
4391 	struct ice_aqc_move_txqs *cmd;
4392 	struct ice_aq_desc desc;
4393 	enum ice_status status;
4394 
4395 	cmd = &desc.params.move_txqs;
4396 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4397 
4398 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4399 	if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4400 		return ICE_ERR_PARAM;
4401 
4402 	if (is_tc_change && !flush_pipe && !blocked_cgds)
4403 		return ICE_ERR_PARAM;
4404 
4405 	if (!is_move && !is_tc_change)
4406 		return ICE_ERR_PARAM;
4407 
4408 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4409 
4410 	if (is_move)
4411 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4412 
4413 	if (is_tc_change)
4414 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4415 
4416 	if (subseq_call)
4417 		cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4418 
4419 	if (flush_pipe)
4420 		cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4421 
4422 	cmd->num_qs = num_qs;
4423 	cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4424 			ICE_AQC_Q_CMD_TIMEOUT_M);
4425 
4426 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4427 
4428 	if (!status && txqs_moved)
4429 		*txqs_moved = cmd->num_qs;
4430 
4431 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4432 	    is_tc_change && !flush_pipe)
4433 		*blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4434 
4435 	return status;
4436 }
4437 
4438 /**
4439  * ice_aq_add_rdma_qsets
4440  * @hw: pointer to the hardware structure
4441  * @num_qset_grps: Number of RDMA Qset groups
4442  * @qset_list: list of qset groups to be added
4443  * @buf_size: size of buffer for indirect command
4444  * @cd: pointer to command details structure or NULL
4445  *
4446  * Add Tx RDMA Qsets (0x0C33)
4447  */
4448 enum ice_status
4449 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4450 		      struct ice_aqc_add_rdma_qset_data *qset_list,
4451 		      u16 buf_size, struct ice_sq_cd *cd)
4452 {
4453 	struct ice_aqc_add_rdma_qset_data *list;
4454 	struct ice_aqc_add_rdma_qset *cmd;
4455 	struct ice_aq_desc desc;
4456 	u16 i, sum_size = 0;
4457 
4458 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4459 
4460 	cmd = &desc.params.add_rdma_qset;
4461 
4462 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4463 
4464 	if (!qset_list)
4465 		return ICE_ERR_PARAM;
4466 
4467 	if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4468 		return ICE_ERR_PARAM;
4469 
4470 	for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4471 		u16 num_qsets = LE16_TO_CPU(list->num_qsets);
4472 
4473 		sum_size += ice_struct_size(list, rdma_qsets, num_qsets);
4474 		list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4475 							     num_qsets);
4476 	}
4477 
4478 	if (buf_size != sum_size)
4479 		return ICE_ERR_PARAM;
4480 
4481 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4482 
4483 	cmd->num_qset_grps = num_qset_grps;
4484 
4485 	return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4486 }
4487 
4488 /* End of FW Admin Queue command wrappers */
4489 
4490 /**
4491  * ice_write_byte - write a byte to a packed context structure
4492  * @src_ctx:  the context structure to read from
4493  * @dest_ctx: the context to be written to
4494  * @ce_info:  a description of the struct to be filled
4495  */
4496 static void
4497 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4498 {
4499 	u8 src_byte, dest_byte, mask;
4500 	u8 *from, *dest;
4501 	u16 shift_width;
4502 
4503 	/* copy from the next struct field */
4504 	from = src_ctx + ce_info->offset;
4505 
4506 	/* prepare the bits and mask */
4507 	shift_width = ce_info->lsb % 8;
4508 	mask = (u8)(BIT(ce_info->width) - 1);
4509 
4510 	src_byte = *from;
4511 	src_byte &= mask;
4512 
4513 	/* shift to correct alignment */
4514 	mask <<= shift_width;
4515 	src_byte <<= shift_width;
4516 
4517 	/* get the current bits from the target bit string */
4518 	dest = dest_ctx + (ce_info->lsb / 8);
4519 
4520 	ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4521 
4522 	dest_byte &= ~mask;	/* get the bits not changing */
4523 	dest_byte |= src_byte;	/* add in the new bits */
4524 
4525 	/* put it all back */
4526 	ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4527 }
4528 
4529 /**
4530  * ice_write_word - write a word to a packed context structure
4531  * @src_ctx:  the context structure to read from
4532  * @dest_ctx: the context to be written to
4533  * @ce_info:  a description of the struct to be filled
4534  */
4535 static void
4536 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4537 {
4538 	u16 src_word, mask;
4539 	__le16 dest_word;
4540 	u8 *from, *dest;
4541 	u16 shift_width;
4542 
4543 	/* copy from the next struct field */
4544 	from = src_ctx + ce_info->offset;
4545 
4546 	/* prepare the bits and mask */
4547 	shift_width = ce_info->lsb % 8;
4548 	mask = BIT(ce_info->width) - 1;
4549 
4550 	/* don't swizzle the bits until after the mask because the mask bits
4551 	 * will be in a different bit position on big endian machines
4552 	 */
4553 	src_word = *(u16 *)from;
4554 	src_word &= mask;
4555 
4556 	/* shift to correct alignment */
4557 	mask <<= shift_width;
4558 	src_word <<= shift_width;
4559 
4560 	/* get the current bits from the target bit string */
4561 	dest = dest_ctx + (ce_info->lsb / 8);
4562 
4563 	ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
4564 
4565 	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
4566 	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
4567 
4568 	/* put it all back */
4569 	ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4570 }
4571 
4572 /**
4573  * ice_write_dword - write a dword to a packed context structure
4574  * @src_ctx:  the context structure to read from
4575  * @dest_ctx: the context to be written to
4576  * @ce_info:  a description of the struct to be filled
4577  */
4578 static void
4579 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4580 {
4581 	u32 src_dword, mask;
4582 	__le32 dest_dword;
4583 	u8 *from, *dest;
4584 	u16 shift_width;
4585 
4586 	/* copy from the next struct field */
4587 	from = src_ctx + ce_info->offset;
4588 
4589 	/* prepare the bits and mask */
4590 	shift_width = ce_info->lsb % 8;
4591 
4592 	/* if the field width is exactly 32 on an x86 machine, then the shift
4593 	 * operation will not work because the SHL instructions count is masked
4594 	 * to 5 bits so the shift will do nothing
4595 	 */
4596 	if (ce_info->width < 32)
4597 		mask = BIT(ce_info->width) - 1;
4598 	else
4599 		mask = (u32)~0;
4600 
4601 	/* don't swizzle the bits until after the mask because the mask bits
4602 	 * will be in a different bit position on big endian machines
4603 	 */
4604 	src_dword = *(u32 *)from;
4605 	src_dword &= mask;
4606 
4607 	/* shift to correct alignment */
4608 	mask <<= shift_width;
4609 	src_dword <<= shift_width;
4610 
4611 	/* get the current bits from the target bit string */
4612 	dest = dest_ctx + (ce_info->lsb / 8);
4613 
4614 	ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4615 
4616 	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
4617 	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
4618 
4619 	/* put it all back */
4620 	ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4621 }
4622 
4623 /**
4624  * ice_write_qword - write a qword to a packed context structure
4625  * @src_ctx:  the context structure to read from
4626  * @dest_ctx: the context to be written to
4627  * @ce_info:  a description of the struct to be filled
4628  */
4629 static void
4630 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4631 {
4632 	u64 src_qword, mask;
4633 	__le64 dest_qword;
4634 	u8 *from, *dest;
4635 	u16 shift_width;
4636 
4637 	/* copy from the next struct field */
4638 	from = src_ctx + ce_info->offset;
4639 
4640 	/* prepare the bits and mask */
4641 	shift_width = ce_info->lsb % 8;
4642 
4643 	/* if the field width is exactly 64 on an x86 machine, then the shift
4644 	 * operation will not work because the SHL instructions count is masked
4645 	 * to 6 bits so the shift will do nothing
4646 	 */
4647 	if (ce_info->width < 64)
4648 		mask = BIT_ULL(ce_info->width) - 1;
4649 	else
4650 		mask = (u64)~0;
4651 
4652 	/* don't swizzle the bits until after the mask because the mask bits
4653 	 * will be in a different bit position on big endian machines
4654 	 */
4655 	src_qword = *(u64 *)from;
4656 	src_qword &= mask;
4657 
4658 	/* shift to correct alignment */
4659 	mask <<= shift_width;
4660 	src_qword <<= shift_width;
4661 
4662 	/* get the current bits from the target bit string */
4663 	dest = dest_ctx + (ce_info->lsb / 8);
4664 
4665 	ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4666 
4667 	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
4668 	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
4669 
4670 	/* put it all back */
4671 	ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4672 }
4673 
4674 /**
4675  * ice_set_ctx - set context bits in packed structure
4676  * @hw: pointer to the hardware structure
4677  * @src_ctx:  pointer to a generic non-packed context structure
4678  * @dest_ctx: pointer to memory for the packed structure
4679  * @ce_info:  a description of the structure to be transformed
4680  */
4681 enum ice_status
4682 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4683 	    const struct ice_ctx_ele *ce_info)
4684 {
4685 	int f;
4686 
4687 	for (f = 0; ce_info[f].width; f++) {
4688 		/* We have to deal with each element of the FW response
4689 		 * using the correct size so that we are correct regardless
4690 		 * of the endianness of the machine.
4691 		 */
4692 		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4693 			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4694 				  f, ce_info[f].width, ce_info[f].size_of);
4695 			continue;
4696 		}
4697 		switch (ce_info[f].size_of) {
4698 		case sizeof(u8):
4699 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4700 			break;
4701 		case sizeof(u16):
4702 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4703 			break;
4704 		case sizeof(u32):
4705 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4706 			break;
4707 		case sizeof(u64):
4708 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4709 			break;
4710 		default:
4711 			return ICE_ERR_INVAL_SIZE;
4712 		}
4713 	}
4714 
4715 	return ICE_SUCCESS;
4716 }
4717 
4718 /**
4719  * ice_aq_get_internal_data
4720  * @hw: pointer to the hardware structure
4721  * @cluster_id: specific cluster to dump
4722  * @table_id: table ID within cluster
4723  * @start: index of line in the block to read
4724  * @buf: dump buffer
4725  * @buf_size: dump buffer size
4726  * @ret_buf_size: return buffer size (returned by FW)
4727  * @ret_next_table: next block to read (returned by FW)
4728  * @ret_next_index: next index to read (returned by FW)
4729  * @cd: pointer to command details structure
4730  *
4731  * Get internal FW/HW data (0xFF08) for debug purposes.
4732  */
4733 enum ice_status
4734 ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
4735 			 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
4736 			 u16 *ret_next_table, u32 *ret_next_index,
4737 			 struct ice_sq_cd *cd)
4738 {
4739 	struct ice_aqc_debug_dump_internals *cmd;
4740 	struct ice_aq_desc desc;
4741 	enum ice_status status;
4742 
4743 	cmd = &desc.params.debug_dump;
4744 
4745 	if (buf_size == 0 || !buf)
4746 		return ICE_ERR_PARAM;
4747 
4748 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
4749 
4750 	cmd->cluster_id = cluster_id;
4751 	cmd->table_id = CPU_TO_LE16(table_id);
4752 	cmd->idx = CPU_TO_LE32(start);
4753 
4754 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4755 
4756 	if (!status) {
4757 		if (ret_buf_size)
4758 			*ret_buf_size = LE16_TO_CPU(desc.datalen);
4759 		if (ret_next_table)
4760 			*ret_next_table = LE16_TO_CPU(cmd->table_id);
4761 		if (ret_next_index)
4762 			*ret_next_index = LE32_TO_CPU(cmd->idx);
4763 	}
4764 
4765 	return status;
4766 }
4767 
4768 /**
4769  * ice_read_byte - read context byte into struct
4770  * @src_ctx:  the context structure to read from
4771  * @dest_ctx: the context to be written to
4772  * @ce_info:  a description of the struct to be filled
4773  */
4774 static void
4775 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4776 {
4777 	u8 dest_byte, mask;
4778 	u8 *src, *target;
4779 	u16 shift_width;
4780 
4781 	/* prepare the bits and mask */
4782 	shift_width = ce_info->lsb % 8;
4783 	mask = (u8)(BIT(ce_info->width) - 1);
4784 
4785 	/* shift to correct alignment */
4786 	mask <<= shift_width;
4787 
4788 	/* get the current bits from the src bit string */
4789 	src = src_ctx + (ce_info->lsb / 8);
4790 
4791 	ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4792 
4793 	dest_byte &= ~(mask);
4794 
4795 	dest_byte >>= shift_width;
4796 
4797 	/* get the address from the struct field */
4798 	target = dest_ctx + ce_info->offset;
4799 
4800 	/* put it back in the struct */
4801 	ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4802 }
4803 
4804 /**
4805  * ice_read_word - read context word into struct
4806  * @src_ctx:  the context structure to read from
4807  * @dest_ctx: the context to be written to
4808  * @ce_info:  a description of the struct to be filled
4809  */
4810 static void
4811 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4812 {
4813 	u16 dest_word, mask;
4814 	u8 *src, *target;
4815 	__le16 src_word;
4816 	u16 shift_width;
4817 
4818 	/* prepare the bits and mask */
4819 	shift_width = ce_info->lsb % 8;
4820 	mask = BIT(ce_info->width) - 1;
4821 
4822 	/* shift to correct alignment */
4823 	mask <<= shift_width;
4824 
4825 	/* get the current bits from the src bit string */
4826 	src = src_ctx + (ce_info->lsb / 8);
4827 
4828 	ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4829 
4830 	/* the data in the memory is stored as little endian so mask it
4831 	 * correctly
4832 	 */
4833 	src_word &= ~(CPU_TO_LE16(mask));
4834 
4835 	/* get the data back into host order before shifting */
4836 	dest_word = LE16_TO_CPU(src_word);
4837 
4838 	dest_word >>= shift_width;
4839 
4840 	/* get the address from the struct field */
4841 	target = dest_ctx + ce_info->offset;
4842 
4843 	/* put it back in the struct */
4844 	ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4845 }
4846 
4847 /**
4848  * ice_read_dword - read context dword into struct
4849  * @src_ctx:  the context structure to read from
4850  * @dest_ctx: the context to be written to
4851  * @ce_info:  a description of the struct to be filled
4852  */
4853 static void
4854 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4855 {
4856 	u32 dest_dword, mask;
4857 	__le32 src_dword;
4858 	u8 *src, *target;
4859 	u16 shift_width;
4860 
4861 	/* prepare the bits and mask */
4862 	shift_width = ce_info->lsb % 8;
4863 
4864 	/* if the field width is exactly 32 on an x86 machine, then the shift
4865 	 * operation will not work because the SHL instructions count is masked
4866 	 * to 5 bits so the shift will do nothing
4867 	 */
4868 	if (ce_info->width < 32)
4869 		mask = BIT(ce_info->width) - 1;
4870 	else
4871 		mask = (u32)~0;
4872 
4873 	/* shift to correct alignment */
4874 	mask <<= shift_width;
4875 
4876 	/* get the current bits from the src bit string */
4877 	src = src_ctx + (ce_info->lsb / 8);
4878 
4879 	ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4880 
4881 	/* the data in the memory is stored as little endian so mask it
4882 	 * correctly
4883 	 */
4884 	src_dword &= ~(CPU_TO_LE32(mask));
4885 
4886 	/* get the data back into host order before shifting */
4887 	dest_dword = LE32_TO_CPU(src_dword);
4888 
4889 	dest_dword >>= shift_width;
4890 
4891 	/* get the address from the struct field */
4892 	target = dest_ctx + ce_info->offset;
4893 
4894 	/* put it back in the struct */
4895 	ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4896 }
4897 
4898 /**
4899  * ice_read_qword - read context qword into struct
4900  * @src_ctx:  the context structure to read from
4901  * @dest_ctx: the context to be written to
4902  * @ce_info:  a description of the struct to be filled
4903  */
4904 static void
4905 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4906 {
4907 	u64 dest_qword, mask;
4908 	__le64 src_qword;
4909 	u8 *src, *target;
4910 	u16 shift_width;
4911 
4912 	/* prepare the bits and mask */
4913 	shift_width = ce_info->lsb % 8;
4914 
4915 	/* if the field width is exactly 64 on an x86 machine, then the shift
4916 	 * operation will not work because the SHL instructions count is masked
4917 	 * to 6 bits so the shift will do nothing
4918 	 */
4919 	if (ce_info->width < 64)
4920 		mask = BIT_ULL(ce_info->width) - 1;
4921 	else
4922 		mask = (u64)~0;
4923 
4924 	/* shift to correct alignment */
4925 	mask <<= shift_width;
4926 
4927 	/* get the current bits from the src bit string */
4928 	src = src_ctx + (ce_info->lsb / 8);
4929 
4930 	ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4931 
4932 	/* the data in the memory is stored as little endian so mask it
4933 	 * correctly
4934 	 */
4935 	src_qword &= ~(CPU_TO_LE64(mask));
4936 
4937 	/* get the data back into host order before shifting */
4938 	dest_qword = LE64_TO_CPU(src_qword);
4939 
4940 	dest_qword >>= shift_width;
4941 
4942 	/* get the address from the struct field */
4943 	target = dest_ctx + ce_info->offset;
4944 
4945 	/* put it back in the struct */
4946 	ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4947 }
4948 
4949 /**
4950  * ice_get_ctx - extract context bits from a packed structure
4951  * @src_ctx:  pointer to a generic packed context structure
4952  * @dest_ctx: pointer to a generic non-packed context structure
4953  * @ce_info:  a description of the structure to be read from
4954  */
4955 enum ice_status
4956 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4957 {
4958 	int f;
4959 
4960 	for (f = 0; ce_info[f].width; f++) {
4961 		switch (ce_info[f].size_of) {
4962 		case 1:
4963 			ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4964 			break;
4965 		case 2:
4966 			ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4967 			break;
4968 		case 4:
4969 			ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4970 			break;
4971 		case 8:
4972 			ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4973 			break;
4974 		default:
4975 			/* nothing to do, just keep going */
4976 			break;
4977 		}
4978 	}
4979 
4980 	return ICE_SUCCESS;
4981 }
4982 
4983 /**
4984  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4985  * @hw: pointer to the HW struct
4986  * @vsi_handle: software VSI handle
4987  * @tc: TC number
4988  * @q_handle: software queue handle
4989  */
4990 struct ice_q_ctx *
4991 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4992 {
4993 	struct ice_vsi_ctx *vsi;
4994 	struct ice_q_ctx *q_ctx;
4995 
4996 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
4997 	if (!vsi)
4998 		return NULL;
4999 	if (q_handle >= vsi->num_lan_q_entries[tc])
5000 		return NULL;
5001 	if (!vsi->lan_q_ctx[tc])
5002 		return NULL;
5003 	q_ctx = vsi->lan_q_ctx[tc];
5004 	return &q_ctx[q_handle];
5005 }
5006 
5007 /**
5008  * ice_ena_vsi_txq
5009  * @pi: port information structure
5010  * @vsi_handle: software VSI handle
5011  * @tc: TC number
5012  * @q_handle: software queue handle
5013  * @num_qgrps: Number of added queue groups
5014  * @buf: list of queue groups to be added
5015  * @buf_size: size of buffer for indirect command
5016  * @cd: pointer to command details structure or NULL
5017  *
5018  * This function adds one LAN queue
5019  */
5020 enum ice_status
5021 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
5022 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
5023 		struct ice_sq_cd *cd)
5024 {
5025 	struct ice_aqc_txsched_elem_data node = { 0 };
5026 	struct ice_sched_node *parent;
5027 	struct ice_q_ctx *q_ctx;
5028 	enum ice_status status;
5029 	struct ice_hw *hw;
5030 
5031 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5032 		return ICE_ERR_CFG;
5033 
5034 	if (num_qgrps > 1 || buf->num_txqs > 1)
5035 		return ICE_ERR_MAX_LIMIT;
5036 
5037 	hw = pi->hw;
5038 
5039 	if (!ice_is_vsi_valid(hw, vsi_handle))
5040 		return ICE_ERR_PARAM;
5041 
5042 	ice_acquire_lock(&pi->sched_lock);
5043 
5044 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
5045 	if (!q_ctx) {
5046 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
5047 			  q_handle);
5048 		status = ICE_ERR_PARAM;
5049 		goto ena_txq_exit;
5050 	}
5051 
5052 	/* find a parent node */
5053 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5054 					    ICE_SCHED_NODE_OWNER_LAN);
5055 	if (!parent) {
5056 		status = ICE_ERR_PARAM;
5057 		goto ena_txq_exit;
5058 	}
5059 
5060 	buf->parent_teid = parent->info.node_teid;
5061 	node.parent_teid = parent->info.node_teid;
5062 	/* Mark that the values in the "generic" section as valid. The default
5063 	 * value in the "generic" section is zero. This means that :
5064 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
5065 	 * - 0 priority among siblings, indicated by Bit 1-3.
5066 	 * - WFQ, indicated by Bit 4.
5067 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
5068 	 * Bit 5-6.
5069 	 * - Bit 7 is reserved.
5070 	 * Without setting the generic section as valid in valid_sections, the
5071 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
5072 	 */
5073 	buf->txqs[0].info.valid_sections =
5074 		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5075 		ICE_AQC_ELEM_VALID_EIR;
5076 	buf->txqs[0].info.generic = 0;
5077 	buf->txqs[0].info.cir_bw.bw_profile_idx =
5078 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5079 	buf->txqs[0].info.cir_bw.bw_alloc =
5080 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5081 	buf->txqs[0].info.eir_bw.bw_profile_idx =
5082 		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5083 	buf->txqs[0].info.eir_bw.bw_alloc =
5084 		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5085 
5086 	/* add the LAN queue */
5087 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
5088 	if (status != ICE_SUCCESS) {
5089 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
5090 			  LE16_TO_CPU(buf->txqs[0].txq_id),
5091 			  hw->adminq.sq_last_status);
5092 		goto ena_txq_exit;
5093 	}
5094 
5095 	node.node_teid = buf->txqs[0].q_teid;
5096 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5097 	q_ctx->q_handle = q_handle;
5098 	q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
5099 
5100 	/* add a leaf node into scheduler tree queue layer */
5101 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
5102 	if (!status)
5103 		status = ice_sched_replay_q_bw(pi, q_ctx);
5104 
5105 ena_txq_exit:
5106 	ice_release_lock(&pi->sched_lock);
5107 	return status;
5108 }
5109 
5110 /**
5111  * ice_dis_vsi_txq
5112  * @pi: port information structure
5113  * @vsi_handle: software VSI handle
5114  * @tc: TC number
5115  * @num_queues: number of queues
5116  * @q_handles: pointer to software queue handle array
5117  * @q_ids: pointer to the q_id array
5118  * @q_teids: pointer to queue node teids
5119  * @rst_src: if called due to reset, specifies the reset source
5120  * @vmvf_num: the relative VM or VF number that is undergoing the reset
5121  * @cd: pointer to command details structure or NULL
5122  *
5123  * This function removes queues and their corresponding nodes in SW DB
5124  */
5125 enum ice_status
5126 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
5127 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
5128 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
5129 		struct ice_sq_cd *cd)
5130 {
5131 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
5132 	struct ice_aqc_dis_txq_item *qg_list;
5133 	struct ice_q_ctx *q_ctx;
5134 	struct ice_hw *hw;
5135 	u16 i, buf_size;
5136 
5137 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5138 		return ICE_ERR_CFG;
5139 
5140 	hw = pi->hw;
5141 
5142 	if (!num_queues) {
5143 		/* if queue is disabled already yet the disable queue command
5144 		 * has to be sent to complete the VF reset, then call
5145 		 * ice_aq_dis_lan_txq without any queue information
5146 		 */
5147 		if (rst_src)
5148 			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5149 						  vmvf_num, NULL);
5150 		return ICE_ERR_CFG;
5151 	}
5152 
5153 	buf_size = ice_struct_size(qg_list, q_id, 1);
5154 	qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5155 	if (!qg_list)
5156 		return ICE_ERR_NO_MEMORY;
5157 
5158 	ice_acquire_lock(&pi->sched_lock);
5159 
5160 	for (i = 0; i < num_queues; i++) {
5161 		struct ice_sched_node *node;
5162 
5163 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5164 		if (!node)
5165 			continue;
5166 		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5167 		if (!q_ctx) {
5168 			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5169 				  q_handles[i]);
5170 			continue;
5171 		}
5172 		if (q_ctx->q_handle != q_handles[i]) {
5173 			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5174 				  q_ctx->q_handle, q_handles[i]);
5175 			continue;
5176 		}
5177 		qg_list->parent_teid = node->info.parent_teid;
5178 		qg_list->num_qs = 1;
5179 		qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5180 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5181 					    vmvf_num, cd);
5182 
5183 		if (status != ICE_SUCCESS)
5184 			break;
5185 		ice_free_sched_node(pi, node);
5186 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5187 	}
5188 	ice_release_lock(&pi->sched_lock);
5189 	ice_free(hw, qg_list);
5190 	return status;
5191 }
5192 
5193 /**
5194  * ice_cfg_vsi_qs - configure the new/existing VSI queues
5195  * @pi: port information structure
5196  * @vsi_handle: software VSI handle
5197  * @tc_bitmap: TC bitmap
5198  * @maxqs: max queues array per TC
5199  * @owner: LAN or RDMA
5200  *
5201  * This function adds/updates the VSI queues per TC.
5202  */
5203 static enum ice_status
5204 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5205 	       u16 *maxqs, u8 owner)
5206 {
5207 	enum ice_status status = ICE_SUCCESS;
5208 	u8 i;
5209 
5210 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5211 		return ICE_ERR_CFG;
5212 
5213 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5214 		return ICE_ERR_PARAM;
5215 
5216 	ice_acquire_lock(&pi->sched_lock);
5217 
5218 	ice_for_each_traffic_class(i) {
5219 		/* configuration is possible only if TC node is present */
5220 		if (!ice_sched_get_tc_node(pi, i))
5221 			continue;
5222 
5223 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5224 					   ice_is_tc_ena(tc_bitmap, i));
5225 		if (status)
5226 			break;
5227 	}
5228 
5229 	ice_release_lock(&pi->sched_lock);
5230 	return status;
5231 }
5232 
5233 /**
5234  * ice_cfg_vsi_lan - configure VSI LAN queues
5235  * @pi: port information structure
5236  * @vsi_handle: software VSI handle
5237  * @tc_bitmap: TC bitmap
5238  * @max_lanqs: max LAN queues array per TC
5239  *
5240  * This function adds/updates the VSI LAN queues per TC.
5241  */
5242 enum ice_status
5243 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5244 		u16 *max_lanqs)
5245 {
5246 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5247 			      ICE_SCHED_NODE_OWNER_LAN);
5248 }
5249 
5250 /**
5251  * ice_cfg_vsi_rdma - configure the VSI RDMA queues
5252  * @pi: port information structure
5253  * @vsi_handle: software VSI handle
5254  * @tc_bitmap: TC bitmap
5255  * @max_rdmaqs: max RDMA queues array per TC
5256  *
5257  * This function adds/updates the VSI RDMA queues per TC.
5258  */
5259 enum ice_status
5260 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5261 		 u16 *max_rdmaqs)
5262 {
5263 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
5264 			      ICE_SCHED_NODE_OWNER_RDMA);
5265 }
5266 
5267 /**
5268  * ice_ena_vsi_rdma_qset
5269  * @pi: port information structure
5270  * @vsi_handle: software VSI handle
5271  * @tc: TC number
5272  * @rdma_qset: pointer to RDMA qset
5273  * @num_qsets: number of RDMA qsets
5274  * @qset_teid: pointer to qset node teids
5275  *
5276  * This function adds RDMA qset
5277  */
5278 enum ice_status
5279 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
5280 		      u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
5281 {
5282 	struct ice_aqc_txsched_elem_data node = { 0 };
5283 	struct ice_aqc_add_rdma_qset_data *buf;
5284 	struct ice_sched_node *parent;
5285 	enum ice_status status;
5286 	struct ice_hw *hw;
5287 	u16 i, buf_size;
5288 
5289 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5290 		return ICE_ERR_CFG;
5291 	hw = pi->hw;
5292 
5293 	if (!ice_is_vsi_valid(hw, vsi_handle))
5294 		return ICE_ERR_PARAM;
5295 
5296 	buf_size = ice_struct_size(buf, rdma_qsets, num_qsets);
5297 	buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size);
5298 	if (!buf)
5299 		return ICE_ERR_NO_MEMORY;
5300 	ice_acquire_lock(&pi->sched_lock);
5301 
5302 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5303 					    ICE_SCHED_NODE_OWNER_RDMA);
5304 	if (!parent) {
5305 		status = ICE_ERR_PARAM;
5306 		goto rdma_error_exit;
5307 	}
5308 	buf->parent_teid = parent->info.node_teid;
5309 	node.parent_teid = parent->info.node_teid;
5310 
5311 	buf->num_qsets = CPU_TO_LE16(num_qsets);
5312 	for (i = 0; i < num_qsets; i++) {
5313 		buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]);
5314 		buf->rdma_qsets[i].info.valid_sections =
5315 			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5316 			ICE_AQC_ELEM_VALID_EIR;
5317 		buf->rdma_qsets[i].info.generic = 0;
5318 		buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
5319 			CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5320 		buf->rdma_qsets[i].info.cir_bw.bw_alloc =
5321 			CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5322 		buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
5323 			CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5324 		buf->rdma_qsets[i].info.eir_bw.bw_alloc =
5325 			CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5326 	}
5327 	status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
5328 	if (status != ICE_SUCCESS) {
5329 		ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
5330 		goto rdma_error_exit;
5331 	}
5332 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5333 	for (i = 0; i < num_qsets; i++) {
5334 		node.node_teid = buf->rdma_qsets[i].qset_teid;
5335 		status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
5336 					    &node);
5337 		if (status)
5338 			break;
5339 		qset_teid[i] = LE32_TO_CPU(node.node_teid);
5340 	}
5341 rdma_error_exit:
5342 	ice_release_lock(&pi->sched_lock);
5343 	ice_free(hw, buf);
5344 	return status;
5345 }
5346 
5347 /**
5348  * ice_dis_vsi_rdma_qset - free RDMA resources
5349  * @pi: port_info struct
5350  * @count: number of RDMA qsets to free
5351  * @qset_teid: TEID of qset node
5352  * @q_id: list of queue IDs being disabled
5353  */
5354 enum ice_status
5355 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
5356 		      u16 *q_id)
5357 {
5358 	struct ice_aqc_dis_txq_item *qg_list;
5359 	enum ice_status status = ICE_SUCCESS;
5360 	struct ice_hw *hw;
5361 	u16 qg_size;
5362 	int i;
5363 
5364 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5365 		return ICE_ERR_CFG;
5366 
5367 	hw = pi->hw;
5368 
5369 	qg_size = ice_struct_size(qg_list, q_id, 1);
5370 	qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size);
5371 	if (!qg_list)
5372 		return ICE_ERR_NO_MEMORY;
5373 
5374 	ice_acquire_lock(&pi->sched_lock);
5375 
5376 	for (i = 0; i < count; i++) {
5377 		struct ice_sched_node *node;
5378 
5379 		node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
5380 		if (!node)
5381 			continue;
5382 
5383 		qg_list->parent_teid = node->info.parent_teid;
5384 		qg_list->num_qs = 1;
5385 		qg_list->q_id[0] =
5386 			CPU_TO_LE16(q_id[i] |
5387 				    ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
5388 
5389 		status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
5390 					    ICE_NO_RESET, 0, NULL);
5391 		if (status)
5392 			break;
5393 
5394 		ice_free_sched_node(pi, node);
5395 	}
5396 
5397 	ice_release_lock(&pi->sched_lock);
5398 	ice_free(hw, qg_list);
5399 	return status;
5400 }
5401 
5402 /**
5403  * ice_is_main_vsi - checks whether the VSI is main VSI
5404  * @hw: pointer to the HW struct
5405  * @vsi_handle: VSI handle
5406  *
5407  * Checks whether the VSI is the main VSI (the first PF VSI created on
5408  * given PF).
5409  */
5410 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5411 {
5412 	return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5413 }
5414 
5415 /**
5416  * ice_replay_pre_init - replay pre initialization
5417  * @hw: pointer to the HW struct
5418  * @sw: pointer to switch info struct for which function initializes filters
5419  *
5420  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5421  */
5422 enum ice_status
5423 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5424 {
5425 	enum ice_status status;
5426 	u8 i;
5427 
5428 	/* Delete old entries from replay filter list head if there is any */
5429 	ice_rm_sw_replay_rule_info(hw, sw);
5430 	/* In start of replay, move entries into replay_rules list, it
5431 	 * will allow adding rules entries back to filt_rules list,
5432 	 * which is operational list.
5433 	 */
5434 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5435 		LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5436 				  &sw->recp_list[i].filt_replay_rules);
5437 	ice_sched_replay_agg_vsi_preinit(hw);
5438 
5439 	status = ice_sched_replay_root_node_bw(hw->port_info);
5440 	if (status)
5441 		return status;
5442 
5443 	return ice_sched_replay_tc_node_bw(hw->port_info);
5444 }
5445 
5446 /**
5447  * ice_replay_vsi - replay VSI configuration
5448  * @hw: pointer to the HW struct
5449  * @vsi_handle: driver VSI handle
5450  *
5451  * Restore all VSI configuration after reset. It is required to call this
5452  * function with main VSI first.
5453  */
5454 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5455 {
5456 	struct ice_switch_info *sw = hw->switch_info;
5457 	struct ice_port_info *pi = hw->port_info;
5458 	enum ice_status status;
5459 
5460 	if (!ice_is_vsi_valid(hw, vsi_handle))
5461 		return ICE_ERR_PARAM;
5462 
5463 	/* Replay pre-initialization if there is any */
5464 	if (ice_is_main_vsi(hw, vsi_handle)) {
5465 		status = ice_replay_pre_init(hw, sw);
5466 		if (status)
5467 			return status;
5468 	}
5469 	/* Replay per VSI all RSS configurations */
5470 	status = ice_replay_rss_cfg(hw, vsi_handle);
5471 	if (status)
5472 		return status;
5473 	/* Replay per VSI all filters */
5474 	status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5475 	if (!status)
5476 		status = ice_replay_vsi_agg(hw, vsi_handle);
5477 	return status;
5478 }
5479 
5480 /**
5481  * ice_replay_post - post replay configuration cleanup
5482  * @hw: pointer to the HW struct
5483  *
5484  * Post replay cleanup.
5485  */
5486 void ice_replay_post(struct ice_hw *hw)
5487 {
5488 	/* Delete old entries from replay filter list head */
5489 	ice_rm_all_sw_replay_rule_info(hw);
5490 	ice_sched_replay_agg(hw);
5491 }
5492 
5493 /**
5494  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5495  * @hw: ptr to the hardware info
5496  * @reg: offset of 64 bit HW register to read from
5497  * @prev_stat_loaded: bool to specify if previous stats are loaded
5498  * @prev_stat: ptr to previous loaded stat value
5499  * @cur_stat: ptr to current stat value
5500  */
5501 void
5502 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5503 		  u64 *prev_stat, u64 *cur_stat)
5504 {
5505 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5506 
5507 	/* device stats are not reset at PFR, they likely will not be zeroed
5508 	 * when the driver starts. Thus, save the value from the first read
5509 	 * without adding to the statistic value so that we report stats which
5510 	 * count up from zero.
5511 	 */
5512 	if (!prev_stat_loaded) {
5513 		*prev_stat = new_data;
5514 		return;
5515 	}
5516 
5517 	/* Calculate the difference between the new and old values, and then
5518 	 * add it to the software stat value.
5519 	 */
5520 	if (new_data >= *prev_stat)
5521 		*cur_stat += new_data - *prev_stat;
5522 	else
5523 		/* to manage the potential roll-over */
5524 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5525 
5526 	/* Update the previously stored value to prepare for next read */
5527 	*prev_stat = new_data;
5528 }
5529 
5530 /**
5531  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5532  * @hw: ptr to the hardware info
5533  * @reg: offset of HW register to read from
5534  * @prev_stat_loaded: bool to specify if previous stats are loaded
5535  * @prev_stat: ptr to previous loaded stat value
5536  * @cur_stat: ptr to current stat value
5537  */
5538 void
5539 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5540 		  u64 *prev_stat, u64 *cur_stat)
5541 {
5542 	u32 new_data;
5543 
5544 	new_data = rd32(hw, reg);
5545 
5546 	/* device stats are not reset at PFR, they likely will not be zeroed
5547 	 * when the driver starts. Thus, save the value from the first read
5548 	 * without adding to the statistic value so that we report stats which
5549 	 * count up from zero.
5550 	 */
5551 	if (!prev_stat_loaded) {
5552 		*prev_stat = new_data;
5553 		return;
5554 	}
5555 
5556 	/* Calculate the difference between the new and old values, and then
5557 	 * add it to the software stat value.
5558 	 */
5559 	if (new_data >= *prev_stat)
5560 		*cur_stat += new_data - *prev_stat;
5561 	else
5562 		/* to manage the potential roll-over */
5563 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5564 
5565 	/* Update the previously stored value to prepare for next read */
5566 	*prev_stat = new_data;
5567 }
5568 
5569 /**
5570  * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5571  * @hw: ptr to the hardware info
5572  * @vsi_handle: VSI handle
5573  * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5574  * @cur_stats: ptr to current stats structure
5575  *
5576  * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5577  * thus cannot be read using the normal ice_stat_update32 function.
5578  *
5579  * Read the GLV_REPC register associated with the given VSI, and update the
5580  * rx_no_desc and rx_error values in the ice_eth_stats structure.
5581  *
5582  * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5583  * cleared each time it's read.
5584  *
5585  * Note that the GLV_RDPC register also counts the causes that would trigger
5586  * GLV_REPC. However, it does not give the finer grained detail about why the
5587  * packets are being dropped. The GLV_REPC values can be used to distinguish
5588  * whether Rx packets are dropped due to errors or due to no available
5589  * descriptors.
5590  */
5591 void
5592 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5593 		     struct ice_eth_stats *cur_stats)
5594 {
5595 	u16 vsi_num, no_desc, error_cnt;
5596 	u32 repc;
5597 
5598 	if (!ice_is_vsi_valid(hw, vsi_handle))
5599 		return;
5600 
5601 	vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5602 
5603 	/* If we haven't loaded stats yet, just clear the current value */
5604 	if (!prev_stat_loaded) {
5605 		wr32(hw, GLV_REPC(vsi_num), 0);
5606 		return;
5607 	}
5608 
5609 	repc = rd32(hw, GLV_REPC(vsi_num));
5610 	no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
5611 	error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
5612 
5613 	/* Clear the count by writing to the stats register */
5614 	wr32(hw, GLV_REPC(vsi_num), 0);
5615 
5616 	cur_stats->rx_no_desc += no_desc;
5617 	cur_stats->rx_errors += error_cnt;
5618 }
5619 
5620 /**
5621  * ice_aq_alternate_write
5622  * @hw: pointer to the hardware structure
5623  * @reg_addr0: address of first dword to be written
5624  * @reg_val0: value to be written under 'reg_addr0'
5625  * @reg_addr1: address of second dword to be written
5626  * @reg_val1: value to be written under 'reg_addr1'
5627  *
5628  * Write one or two dwords to alternate structure. Fields are indicated
5629  * by 'reg_addr0' and 'reg_addr1' register numbers.
5630  */
5631 enum ice_status
5632 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
5633 		       u32 reg_addr1, u32 reg_val1)
5634 {
5635 	struct ice_aqc_read_write_alt_direct *cmd;
5636 	struct ice_aq_desc desc;
5637 	enum ice_status status;
5638 
5639 	cmd = &desc.params.read_write_alt_direct;
5640 
5641 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
5642 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5643 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5644 	cmd->dword0_value = CPU_TO_LE32(reg_val0);
5645 	cmd->dword1_value = CPU_TO_LE32(reg_val1);
5646 
5647 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5648 
5649 	return status;
5650 }
5651 
5652 /**
5653  * ice_aq_alternate_read
5654  * @hw: pointer to the hardware structure
5655  * @reg_addr0: address of first dword to be read
5656  * @reg_val0: pointer for data read from 'reg_addr0'
5657  * @reg_addr1: address of second dword to be read
5658  * @reg_val1: pointer for data read from 'reg_addr1'
5659  *
5660  * Read one or two dwords from alternate structure. Fields are indicated
5661  * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
5662  * is not passed then only register at 'reg_addr0' is read.
5663  */
5664 enum ice_status
5665 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
5666 		      u32 reg_addr1, u32 *reg_val1)
5667 {
5668 	struct ice_aqc_read_write_alt_direct *cmd;
5669 	struct ice_aq_desc desc;
5670 	enum ice_status status;
5671 
5672 	cmd = &desc.params.read_write_alt_direct;
5673 
5674 	if (!reg_val0)
5675 		return ICE_ERR_PARAM;
5676 
5677 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
5678 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5679 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5680 
5681 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5682 
5683 	if (status == ICE_SUCCESS) {
5684 		*reg_val0 = LE32_TO_CPU(cmd->dword0_value);
5685 
5686 		if (reg_val1)
5687 			*reg_val1 = LE32_TO_CPU(cmd->dword1_value);
5688 	}
5689 
5690 	return status;
5691 }
5692 
5693 /**
5694  *  ice_aq_alternate_write_done
5695  *  @hw: pointer to the HW structure.
5696  *  @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
5697  *  @reset_needed: indicates the SW should trigger GLOBAL reset
5698  *
5699  *  Indicates to the FW that alternate structures have been changed.
5700  */
5701 enum ice_status
5702 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
5703 {
5704 	struct ice_aqc_done_alt_write *cmd;
5705 	struct ice_aq_desc desc;
5706 	enum ice_status status;
5707 
5708 	cmd = &desc.params.done_alt_write;
5709 
5710 	if (!reset_needed)
5711 		return ICE_ERR_PARAM;
5712 
5713 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
5714 	cmd->flags = bios_mode;
5715 
5716 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5717 	if (!status)
5718 		*reset_needed = (LE16_TO_CPU(cmd->flags) &
5719 				 ICE_AQC_RESP_RESET_NEEDED) != 0;
5720 
5721 	return status;
5722 }
5723 
5724 /**
5725  *  ice_aq_alternate_clear
5726  *  @hw: pointer to the HW structure.
5727  *
5728  *  Clear the alternate structures of the port from which the function
5729  *  is called.
5730  */
5731 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
5732 {
5733 	struct ice_aq_desc desc;
5734 	enum ice_status status;
5735 
5736 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
5737 
5738 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5739 
5740 	return status;
5741 }
5742 
5743 /**
5744  * ice_sched_query_elem - query element information from HW
5745  * @hw: pointer to the HW struct
5746  * @node_teid: node TEID to be queried
5747  * @buf: buffer to element information
5748  *
5749  * This function queries HW element information
5750  */
5751 enum ice_status
5752 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5753 		     struct ice_aqc_txsched_elem_data *buf)
5754 {
5755 	u16 buf_size, num_elem_ret = 0;
5756 	enum ice_status status;
5757 
5758 	buf_size = sizeof(*buf);
5759 	ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
5760 	buf->node_teid = CPU_TO_LE32(node_teid);
5761 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5762 					  NULL);
5763 	if (status != ICE_SUCCESS || num_elem_ret != 1)
5764 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5765 	return status;
5766 }
5767 
5768 /**
5769  * ice_get_fw_mode - returns FW mode
5770  * @hw: pointer to the HW struct
5771  */
5772 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
5773 {
5774 #define ICE_FW_MODE_DBG_M BIT(0)
5775 #define ICE_FW_MODE_REC_M BIT(1)
5776 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
5777 	u32 fw_mode;
5778 
5779 	/* check the current FW mode */
5780 	fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
5781 	if (fw_mode & ICE_FW_MODE_DBG_M)
5782 		return ICE_FW_MODE_DBG;
5783 	else if (fw_mode & ICE_FW_MODE_REC_M)
5784 		return ICE_FW_MODE_REC;
5785 	else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
5786 		return ICE_FW_MODE_ROLLBACK;
5787 	else
5788 		return ICE_FW_MODE_NORMAL;
5789 }
5790 
5791 /**
5792  * ice_cfg_get_cur_lldp_persist_status
5793  * @hw: pointer to the HW struct
5794  * @lldp_status: return value of LLDP persistent status
5795  *
5796  * Get the current status of LLDP persistent
5797  */
5798 enum ice_status
5799 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5800 {
5801 	struct ice_port_info *pi = hw->port_info;
5802 	enum ice_status ret;
5803 	__le32 raw_data;
5804 	u32 data, mask;
5805 
5806 	if (!lldp_status)
5807 		return ICE_ERR_BAD_PTR;
5808 
5809 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
5810 	if (ret)
5811 		return ret;
5812 
5813 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
5814 			      ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
5815 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
5816 			      false, true, NULL);
5817 	if (!ret) {
5818 		data = LE32_TO_CPU(raw_data);
5819 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5820 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5821 		data = data & mask;
5822 		*lldp_status = data >>
5823 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5824 	}
5825 
5826 	ice_release_nvm(hw);
5827 
5828 	return ret;
5829 }
5830 
5831 /**
5832  * ice_get_dflt_lldp_persist_status
5833  * @hw: pointer to the HW struct
5834  * @lldp_status: return value of LLDP persistent status
5835  *
5836  * Get the default status of LLDP persistent
5837  */
5838 enum ice_status
5839 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5840 {
5841 	struct ice_port_info *pi = hw->port_info;
5842 	u32 data, mask, loc_data, loc_data_tmp;
5843 	enum ice_status ret;
5844 	__le16 loc_raw_data;
5845 	__le32 raw_data;
5846 
5847 	if (!lldp_status)
5848 		return ICE_ERR_BAD_PTR;
5849 
5850 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
5851 	if (ret)
5852 		return ret;
5853 
5854 	/* Read the offset of EMP_SR_PTR */
5855 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
5856 			      ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
5857 			      ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
5858 			      &loc_raw_data, false, true, NULL);
5859 	if (ret)
5860 		goto exit;
5861 
5862 	loc_data = LE16_TO_CPU(loc_raw_data);
5863 	if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
5864 		loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
5865 		loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
5866 	} else {
5867 		loc_data *= ICE_AQC_NVM_WORD_UNIT;
5868 	}
5869 
5870 	/* Read the offset of LLDP configuration pointer */
5871 	loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
5872 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5873 			      ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
5874 			      false, true, NULL);
5875 	if (ret)
5876 		goto exit;
5877 
5878 	loc_data_tmp = LE16_TO_CPU(loc_raw_data);
5879 	loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
5880 	loc_data += loc_data_tmp;
5881 
5882 	/* We need to skip LLDP configuration section length (2 bytes) */
5883 	loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
5884 
5885 	/* Read the LLDP Default Configure */
5886 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5887 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
5888 			      true, NULL);
5889 	if (!ret) {
5890 		data = LE32_TO_CPU(raw_data);
5891 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5892 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5893 		data = data & mask;
5894 		*lldp_status = data >>
5895 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5896 	}
5897 
5898 exit:
5899 	ice_release_nvm(hw);
5900 
5901 	return ret;
5902 }
5903 
5904 /**
5905  * ice_aq_read_i2c
5906  * @hw: pointer to the hw struct
5907  * @topo_addr: topology address for a device to communicate with
5908  * @bus_addr: 7-bit I2C bus address
5909  * @addr: I2C memory address (I2C offset) with up to 16 bits
5910  * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
5911  *			    bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
5912  * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5913  * @cd: pointer to command details structure or NULL
5914  *
5915  * Read I2C (0x06E2)
5916  */
5917 enum ice_status
5918 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5919 		u16 bus_addr, __le16 addr, u8 params, u8 *data,
5920 		struct ice_sq_cd *cd)
5921 {
5922 	struct ice_aq_desc desc = { 0 };
5923 	struct ice_aqc_i2c *cmd;
5924 	enum ice_status status;
5925 	u8 data_size;
5926 
5927 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5928 	cmd = &desc.params.read_write_i2c;
5929 
5930 	if (!data)
5931 		return ICE_ERR_PARAM;
5932 
5933 	data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5934 
5935 	cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5936 	cmd->topo_addr = topo_addr;
5937 	cmd->i2c_params = params;
5938 	cmd->i2c_addr = addr;
5939 
5940 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5941 	if (!status) {
5942 		struct ice_aqc_read_i2c_resp *resp;
5943 		u8 i;
5944 
5945 		resp = &desc.params.read_i2c_resp;
5946 		for (i = 0; i < data_size; i++) {
5947 			*data = resp->i2c_data[i];
5948 			data++;
5949 		}
5950 	}
5951 
5952 	return status;
5953 }
5954 
5955 /**
5956  * ice_aq_write_i2c
5957  * @hw: pointer to the hw struct
5958  * @topo_addr: topology address for a device to communicate with
5959  * @bus_addr: 7-bit I2C bus address
5960  * @addr: I2C memory address (I2C offset) with up to 16 bits
5961  * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5962  * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5963  * @cd: pointer to command details structure or NULL
5964  *
5965  * Write I2C (0x06E3)
5966  */
5967 enum ice_status
5968 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5969 		 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5970 		 struct ice_sq_cd *cd)
5971 {
5972 	struct ice_aq_desc desc = { 0 };
5973 	struct ice_aqc_i2c *cmd;
5974 	u8 i, data_size;
5975 
5976 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
5977 	cmd = &desc.params.read_write_i2c;
5978 
5979 	data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5980 
5981 	/* data_size limited to 4 */
5982 	if (data_size > 4)
5983 		return ICE_ERR_PARAM;
5984 
5985 	cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5986 	cmd->topo_addr = topo_addr;
5987 	cmd->i2c_params = params;
5988 	cmd->i2c_addr = addr;
5989 
5990 	for (i = 0; i < data_size; i++) {
5991 		cmd->i2c_data[i] = *data;
5992 		data++;
5993 	}
5994 
5995 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5996 }
5997 
5998 /**
5999  * ice_aq_set_gpio
6000  * @hw: pointer to the hw struct
6001  * @gpio_ctrl_handle: GPIO controller node handle
6002  * @pin_idx: IO Number of the GPIO that needs to be set
6003  * @value: SW provide IO value to set in the LSB
6004  * @cd: pointer to command details structure or NULL
6005  *
6006  * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
6007  */
6008 enum ice_status
6009 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
6010 		struct ice_sq_cd *cd)
6011 {
6012 	struct ice_aqc_gpio *cmd;
6013 	struct ice_aq_desc desc;
6014 
6015 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
6016 	cmd = &desc.params.read_write_gpio;
6017 	cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6018 	cmd->gpio_num = pin_idx;
6019 	cmd->gpio_val = value ? 1 : 0;
6020 
6021 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6022 }
6023 
6024 /**
6025  * ice_aq_get_gpio
6026  * @hw: pointer to the hw struct
6027  * @gpio_ctrl_handle: GPIO controller node handle
6028  * @pin_idx: IO Number of the GPIO that needs to be set
6029  * @value: IO value read
6030  * @cd: pointer to command details structure or NULL
6031  *
6032  * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
6033  * the topology
6034  */
6035 enum ice_status
6036 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
6037 		bool *value, struct ice_sq_cd *cd)
6038 {
6039 	struct ice_aqc_gpio *cmd;
6040 	struct ice_aq_desc desc;
6041 	enum ice_status status;
6042 
6043 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
6044 	cmd = &desc.params.read_write_gpio;
6045 	cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6046 	cmd->gpio_num = pin_idx;
6047 
6048 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6049 	if (status)
6050 		return status;
6051 
6052 	*value = !!cmd->gpio_val;
6053 	return ICE_SUCCESS;
6054 }
6055 
6056 /**
6057  * ice_is_fw_api_min_ver
6058  * @hw: pointer to the hardware structure
6059  * @maj: major version
6060  * @min: minor version
6061  * @patch: patch version
6062  *
6063  * Checks if the firmware is minimum version
6064  */
6065 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
6066 {
6067 	if (hw->api_maj_ver == maj) {
6068 		if (hw->api_min_ver > min)
6069 			return true;
6070 		if (hw->api_min_ver == min && hw->api_patch >= patch)
6071 			return true;
6072 	} else if (hw->api_maj_ver > maj) {
6073 		return true;
6074 	}
6075 
6076 	return false;
6077 }
6078 
6079 /**
6080  * ice_is_fw_min_ver
6081  * @hw: pointer to the hardware structure
6082  * @branch: branch version
6083  * @maj: major version
6084  * @min: minor version
6085  * @patch: patch version
6086  *
6087  * Checks if the firmware is minimum version
6088  */
6089 static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
6090 			      u8 patch)
6091 {
6092 	if (hw->fw_branch == branch) {
6093 		if (hw->fw_maj_ver > maj)
6094 			return true;
6095 		if (hw->fw_maj_ver == maj) {
6096 			if (hw->fw_min_ver > min)
6097 				return true;
6098 			if (hw->fw_min_ver == min && hw->fw_patch >= patch)
6099 				return true;
6100 		}
6101 	} else if (hw->fw_branch > branch) {
6102 		return true;
6103 	}
6104 
6105 	return false;
6106 }
6107 
6108 /**
6109  * ice_fw_supports_link_override
6110  * @hw: pointer to the hardware structure
6111  *
6112  * Checks if the firmware supports link override
6113  */
6114 bool ice_fw_supports_link_override(struct ice_hw *hw)
6115 {
6116 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
6117 				     ICE_FW_API_LINK_OVERRIDE_MIN,
6118 				     ICE_FW_API_LINK_OVERRIDE_PATCH);
6119 }
6120 
6121 /**
6122  * ice_get_link_default_override
6123  * @ldo: pointer to the link default override struct
6124  * @pi: pointer to the port info struct
6125  *
6126  * Gets the link default override for a port
6127  */
6128 enum ice_status
6129 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
6130 			      struct ice_port_info *pi)
6131 {
6132 	u16 i, tlv, tlv_len, tlv_start, buf, offset;
6133 	struct ice_hw *hw = pi->hw;
6134 	enum ice_status status;
6135 
6136 	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
6137 					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
6138 	if (status) {
6139 		ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
6140 		return status;
6141 	}
6142 
6143 	/* Each port has its own config; calculate for our port */
6144 	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
6145 		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
6146 
6147 	/* link options first */
6148 	status = ice_read_sr_word(hw, tlv_start, &buf);
6149 	if (status) {
6150 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6151 		return status;
6152 	}
6153 	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
6154 	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
6155 		ICE_LINK_OVERRIDE_PHY_CFG_S;
6156 
6157 	/* link PHY config */
6158 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
6159 	status = ice_read_sr_word(hw, offset, &buf);
6160 	if (status) {
6161 		ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
6162 		return status;
6163 	}
6164 	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
6165 
6166 	/* PHY types low */
6167 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
6168 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6169 		status = ice_read_sr_word(hw, (offset + i), &buf);
6170 		if (status) {
6171 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6172 			return status;
6173 		}
6174 		/* shift 16 bits at a time to fill 64 bits */
6175 		ldo->phy_type_low |= ((u64)buf << (i * 16));
6176 	}
6177 
6178 	/* PHY types high */
6179 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
6180 		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
6181 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6182 		status = ice_read_sr_word(hw, (offset + i), &buf);
6183 		if (status) {
6184 			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6185 			return status;
6186 		}
6187 		/* shift 16 bits at a time to fill 64 bits */
6188 		ldo->phy_type_high |= ((u64)buf << (i * 16));
6189 	}
6190 
6191 	return status;
6192 }
6193 
6194 /**
6195  * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
6196  * @caps: get PHY capability data
6197  */
6198 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
6199 {
6200 	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
6201 	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
6202 				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
6203 				       ICE_AQC_PHY_AN_EN_CLAUSE37))
6204 		return true;
6205 
6206 	return false;
6207 }
6208 
6209 /**
6210  * ice_is_fw_health_report_supported
6211  * @hw: pointer to the hardware structure
6212  *
6213  * Return true if firmware supports health status reports,
6214  * false otherwise
6215  */
6216 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
6217 {
6218 	if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
6219 		return true;
6220 
6221 	if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
6222 		if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
6223 			return true;
6224 		if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
6225 		    hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
6226 			return true;
6227 	}
6228 
6229 	return false;
6230 }
6231 
6232 /**
6233  * ice_aq_set_health_status_config - Configure FW health events
6234  * @hw: pointer to the HW struct
6235  * @event_source: type of diagnostic events to enable
6236  * @cd: pointer to command details structure or NULL
6237  *
6238  * Configure the health status event types that the firmware will send to this
6239  * PF. The supported event types are: PF-specific, all PFs, and global
6240  */
6241 enum ice_status
6242 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
6243 				struct ice_sq_cd *cd)
6244 {
6245 	struct ice_aqc_set_health_status_config *cmd;
6246 	struct ice_aq_desc desc;
6247 
6248 	cmd = &desc.params.set_health_status_config;
6249 
6250 	ice_fill_dflt_direct_cmd_desc(&desc,
6251 				      ice_aqc_opc_set_health_status_config);
6252 
6253 	cmd->event_source = event_source;
6254 
6255 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6256 }
6257 
6258 /**
6259  * ice_aq_get_port_options
6260  * @hw: pointer to the hw struct
6261  * @options: buffer for the resultant port options
6262  * @option_count: input - size of the buffer in port options structures,
6263  *                output - number of returned port options
6264  * @lport: logical port to call the command with (optional)
6265  * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6266  *               when PF owns more than 1 port it must be true
6267  * @active_option_idx: index of active port option in returned buffer
6268  * @active_option_valid: active option in returned buffer is valid
6269  *
6270  * Calls Get Port Options AQC (0x06ea) and verifies result.
6271  */
6272 enum ice_status
6273 ice_aq_get_port_options(struct ice_hw *hw,
6274 			struct ice_aqc_get_port_options_elem *options,
6275 			u8 *option_count, u8 lport, bool lport_valid,
6276 			u8 *active_option_idx, bool *active_option_valid)
6277 {
6278 	struct ice_aqc_get_port_options *cmd;
6279 	struct ice_aq_desc desc;
6280 	enum ice_status status;
6281 	u8 pmd_count;
6282 	u8 max_speed;
6283 	u8 i;
6284 
6285 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6286 
6287 	/* options buffer shall be able to hold max returned options */
6288 	if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
6289 		return ICE_ERR_PARAM;
6290 
6291 	cmd = &desc.params.get_port_options;
6292 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
6293 
6294 	if (lport_valid)
6295 		cmd->lport_num = lport;
6296 	cmd->lport_num_valid = lport_valid;
6297 
6298 	status = ice_aq_send_cmd(hw, &desc, options,
6299 				 *option_count * sizeof(*options), NULL);
6300 	if (status != ICE_SUCCESS)
6301 		return status;
6302 
6303 	/* verify direct FW response & set output parameters */
6304 	*option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
6305 	ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
6306 	*active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
6307 	if (*active_option_valid) {
6308 		*active_option_idx = cmd->port_options &
6309 				     ICE_AQC_PORT_OPT_ACTIVE_M;
6310 		if (*active_option_idx > (*option_count - 1))
6311 			return ICE_ERR_OUT_OF_RANGE;
6312 		ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
6313 			  *active_option_idx);
6314 	}
6315 
6316 	/* verify indirect FW response & mask output options fields */
6317 	for (i = 0; i < *option_count; i++) {
6318 		options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
6319 		options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
6320 		pmd_count = options[i].pmd;
6321 		max_speed = options[i].max_lane_speed;
6322 		ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
6323 			  pmd_count, max_speed);
6324 
6325 		/* check only entries containing valid max pmd speed values,
6326 		 * other reserved values may be returned, when logical port
6327 		 * used is unrelated to specific option
6328 		 */
6329 		if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) {
6330 			if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV)
6331 				return ICE_ERR_OUT_OF_RANGE;
6332 			if (pmd_count > 2 &&
6333 			    max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G)
6334 				return ICE_ERR_CFG;
6335 			if (pmd_count > 7 &&
6336 			    max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G)
6337 				return ICE_ERR_CFG;
6338 		}
6339 	}
6340 
6341 	return ICE_SUCCESS;
6342 }
6343 
6344 /**
6345  * ice_aq_set_lldp_mib - Set the LLDP MIB
6346  * @hw: pointer to the HW struct
6347  * @mib_type: Local, Remote or both Local and Remote MIBs
6348  * @buf: pointer to the caller-supplied buffer to store the MIB block
6349  * @buf_size: size of the buffer (in bytes)
6350  * @cd: pointer to command details structure or NULL
6351  *
6352  * Set the LLDP MIB. (0x0A08)
6353  */
6354 enum ice_status
6355 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6356 		    struct ice_sq_cd *cd)
6357 {
6358 	struct ice_aqc_lldp_set_local_mib *cmd;
6359 	struct ice_aq_desc desc;
6360 
6361 	cmd = &desc.params.lldp_set_mib;
6362 
6363 	if (buf_size == 0 || !buf)
6364 		return ICE_ERR_PARAM;
6365 
6366 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
6367 
6368 	desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
6369 	desc.datalen = CPU_TO_LE16(buf_size);
6370 
6371 	cmd->type = mib_type;
6372 	cmd->length = CPU_TO_LE16(buf_size);
6373 
6374 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6375 }
6376 
6377 /**
6378  * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
6379  * @hw: pointer to HW struct
6380  */
6381 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6382 {
6383 	if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC)
6384 		return false;
6385 
6386 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
6387 				     ICE_FW_API_LLDP_FLTR_MIN,
6388 				     ICE_FW_API_LLDP_FLTR_PATCH);
6389 }
6390 
6391 /**
6392  * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6393  * @hw: pointer to HW struct
6394  * @vsi_num: absolute HW index for VSI
6395  * @add: boolean for if adding or removing a filter
6396  */
6397 enum ice_status
6398 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
6399 {
6400 	struct ice_aqc_lldp_filter_ctrl *cmd;
6401 	struct ice_aq_desc desc;
6402 
6403 	cmd = &desc.params.lldp_filter_ctrl;
6404 
6405 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
6406 
6407 	if (add)
6408 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6409 	else
6410 		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6411 
6412 	cmd->vsi_num = CPU_TO_LE16(vsi_num);
6413 
6414 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6415 }
6416 
6417 /**
6418  * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
6419  * @hw: pointer to HW struct
6420  */
6421 enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw)
6422 {
6423 	struct ice_aq_desc desc;
6424 
6425 	ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib);
6426 
6427 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6428 }
6429 
6430 /**
6431  * ice_fw_supports_report_dflt_cfg
6432  * @hw: pointer to the hardware structure
6433  *
6434  * Checks if the firmware supports report default configuration
6435  */
6436 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6437 {
6438 	return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
6439 				     ICE_FW_API_REPORT_DFLT_CFG_MIN,
6440 				     ICE_FW_API_REPORT_DFLT_CFG_PATCH);
6441 }
6442 
6443 /**
6444  * ice_fw_supports_fec_dis_auto
6445  * @hw: pointer to the hardware structure
6446  *
6447  * Checks if the firmware supports FEC disable in Auto FEC mode
6448  */
6449 bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
6450 {
6451 	return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH,
6452 				 ICE_FW_FEC_DIS_AUTO_MAJ,
6453 				 ICE_FW_FEC_DIS_AUTO_MIN,
6454 				 ICE_FW_FEC_DIS_AUTO_PATCH);
6455 }
6456 /**
6457  * ice_is_fw_auto_drop_supported
6458  * @hw: pointer to the hardware structure
6459  *
6460  * Checks if the firmware supports auto drop feature
6461  */
6462 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
6463 {
6464 	if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
6465 	    hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)
6466 		return true;
6467 	return false;
6468 }
6469 
6470