xref: /freebsd/sys/dev/ice/ice_common.c (revision e17f5b1d)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2020, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 #include "ice_sched.h"
35 #include "ice_adminq_cmd.h"
36 
37 #include "ice_flow.h"
38 #include "ice_switch.h"
39 
40 #define ICE_PF_RESET_WAIT_COUNT	300
41 
42 /**
43  * ice_set_mac_type - Sets MAC type
44  * @hw: pointer to the HW structure
45  *
46  * This function sets the MAC type of the adapter based on the
47  * vendor ID and device ID stored in the HW structure.
48  */
49 enum ice_status ice_set_mac_type(struct ice_hw *hw)
50 {
51 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
52 
53 	if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
54 		return ICE_ERR_DEVICE_NOT_SUPPORTED;
55 
56 	switch (hw->device_id) {
57 	case ICE_DEV_ID_E810C_BACKPLANE:
58 	case ICE_DEV_ID_E810C_QSFP:
59 	case ICE_DEV_ID_E810C_SFP:
60 	case ICE_DEV_ID_E810_XXV_BACKPLANE:
61 	case ICE_DEV_ID_E810_XXV_QSFP:
62 	case ICE_DEV_ID_E810_XXV_SFP:
63 		hw->mac_type = ICE_MAC_E810;
64 		break;
65 	case ICE_DEV_ID_E822C_10G_BASE_T:
66 	case ICE_DEV_ID_E822C_BACKPLANE:
67 	case ICE_DEV_ID_E822C_QSFP:
68 	case ICE_DEV_ID_E822C_SFP:
69 	case ICE_DEV_ID_E822C_SGMII:
70 	case ICE_DEV_ID_E822L_10G_BASE_T:
71 	case ICE_DEV_ID_E822L_BACKPLANE:
72 	case ICE_DEV_ID_E822L_SFP:
73 	case ICE_DEV_ID_E822L_SGMII:
74 	case ICE_DEV_ID_E823L_10G_BASE_T:
75 	case ICE_DEV_ID_E823L_1GBE:
76 	case ICE_DEV_ID_E823L_BACKPLANE:
77 	case ICE_DEV_ID_E823L_QSFP:
78 	case ICE_DEV_ID_E823L_SFP:
79 		hw->mac_type = ICE_MAC_GENERIC;
80 		break;
81 	default:
82 		hw->mac_type = ICE_MAC_UNKNOWN;
83 		break;
84 	}
85 
86 	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
87 	return ICE_SUCCESS;
88 }
89 
90 /**
91  * ice_clear_pf_cfg - Clear PF configuration
92  * @hw: pointer to the hardware structure
93  *
94  * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
95  * configuration, flow director filters, etc.).
96  */
97 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
98 {
99 	struct ice_aq_desc desc;
100 
101 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
102 
103 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
104 }
105 
106 /**
107  * ice_aq_manage_mac_read - manage MAC address read command
108  * @hw: pointer to the HW struct
109  * @buf: a virtual buffer to hold the manage MAC read response
110  * @buf_size: Size of the virtual buffer
111  * @cd: pointer to command details structure or NULL
112  *
113  * This function is used to return per PF station MAC address (0x0107).
114  * NOTE: Upon successful completion of this command, MAC address information
115  * is returned in user specified buffer. Please interpret user specified
116  * buffer as "manage_mac_read" response.
117  * Response such as various MAC addresses are stored in HW struct (port.mac)
118  * ice_aq_discover_caps is expected to be called before this function is called.
119  */
120 enum ice_status
121 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
122 		       struct ice_sq_cd *cd)
123 {
124 	struct ice_aqc_manage_mac_read_resp *resp;
125 	struct ice_aqc_manage_mac_read *cmd;
126 	struct ice_aq_desc desc;
127 	enum ice_status status;
128 	u16 flags;
129 	u8 i;
130 
131 	cmd = &desc.params.mac_read;
132 
133 	if (buf_size < sizeof(*resp))
134 		return ICE_ERR_BUF_TOO_SHORT;
135 
136 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
137 
138 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
139 	if (status)
140 		return status;
141 
142 	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
143 	flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
144 
145 	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
146 		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
147 		return ICE_ERR_CFG;
148 	}
149 
150 	/* A single port can report up to two (LAN and WoL) addresses */
151 	for (i = 0; i < cmd->num_addr; i++)
152 		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
153 			ice_memcpy(hw->port_info->mac.lan_addr,
154 				   resp[i].mac_addr, ETH_ALEN,
155 				   ICE_DMA_TO_NONDMA);
156 			ice_memcpy(hw->port_info->mac.perm_addr,
157 				   resp[i].mac_addr,
158 				   ETH_ALEN, ICE_DMA_TO_NONDMA);
159 			break;
160 		}
161 	return ICE_SUCCESS;
162 }
163 
164 /**
165  * ice_aq_get_phy_caps - returns PHY capabilities
166  * @pi: port information structure
167  * @qual_mods: report qualified modules
168  * @report_mode: report mode capabilities
169  * @pcaps: structure for PHY capabilities to be filled
170  * @cd: pointer to command details structure or NULL
171  *
172  * Returns the various PHY capabilities supported on the Port (0x0600)
173  */
174 enum ice_status
175 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
176 		    struct ice_aqc_get_phy_caps_data *pcaps,
177 		    struct ice_sq_cd *cd)
178 {
179 	struct ice_aqc_get_phy_caps *cmd;
180 	u16 pcaps_size = sizeof(*pcaps);
181 	struct ice_aq_desc desc;
182 	enum ice_status status;
183 
184 	cmd = &desc.params.get_phy;
185 
186 	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
187 		return ICE_ERR_PARAM;
188 
189 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
190 
191 	if (qual_mods)
192 		cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
193 
194 	cmd->param0 |= CPU_TO_LE16(report_mode);
195 	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
196 
197 	if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
198 		pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
199 		pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
200 	}
201 
202 	return status;
203 }
204 
205 /**
206  * ice_aq_get_link_topo_handle - get link topology node return status
207  * @pi: port information structure
208  * @node_type: requested node type
209  * @cd: pointer to command details structure or NULL
210  *
211  * Get link topology node return status for specified node type (0x06E0)
212  *
213  * Node type cage can be used to determine if cage is present. If AQC
214  * returns error (ENOENT), then no cage present. If no cage present, then
215  * connection type is backplane or BASE-T.
216  */
217 static enum ice_status
218 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
219 			    struct ice_sq_cd *cd)
220 {
221 	struct ice_aqc_get_link_topo *cmd;
222 	struct ice_aq_desc desc;
223 
224 	cmd = &desc.params.get_link_topo;
225 
226 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
227 
228 	cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
229 				   ICE_AQC_LINK_TOPO_NODE_CTX_S);
230 
231 	/* set node type */
232 	cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
233 
234 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
235 }
236 
237 /*
238  * ice_is_media_cage_present
239  * @pi: port information structure
240  *
241  * Returns true if media cage is present, else false. If no cage, then
242  * media type is backplane or BASE-T.
243  */
244 static bool ice_is_media_cage_present(struct ice_port_info *pi)
245 {
246 	/* Node type cage can be used to determine if cage is present. If AQC
247 	 * returns error (ENOENT), then no cage present. If no cage present then
248 	 * connection type is backplane or BASE-T.
249 	 */
250 	return !ice_aq_get_link_topo_handle(pi,
251 					    ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
252 					    NULL);
253 }
254 
255 /**
256  * ice_get_media_type - Gets media type
257  * @pi: port information structure
258  */
259 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
260 {
261 	struct ice_link_status *hw_link_info;
262 
263 	if (!pi)
264 		return ICE_MEDIA_UNKNOWN;
265 
266 	hw_link_info = &pi->phy.link_info;
267 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
268 		/* If more than one media type is selected, report unknown */
269 		return ICE_MEDIA_UNKNOWN;
270 
271 	if (hw_link_info->phy_type_low) {
272 		switch (hw_link_info->phy_type_low) {
273 		case ICE_PHY_TYPE_LOW_1000BASE_SX:
274 		case ICE_PHY_TYPE_LOW_1000BASE_LX:
275 		case ICE_PHY_TYPE_LOW_10GBASE_SR:
276 		case ICE_PHY_TYPE_LOW_10GBASE_LR:
277 		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
278 		case ICE_PHY_TYPE_LOW_25GBASE_SR:
279 		case ICE_PHY_TYPE_LOW_25GBASE_LR:
280 		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
281 		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
282 		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
283 		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
284 		case ICE_PHY_TYPE_LOW_50GBASE_SR:
285 		case ICE_PHY_TYPE_LOW_50GBASE_FR:
286 		case ICE_PHY_TYPE_LOW_50GBASE_LR:
287 		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
288 		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
289 		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
290 		case ICE_PHY_TYPE_LOW_100GBASE_DR:
291 			return ICE_MEDIA_FIBER;
292 		case ICE_PHY_TYPE_LOW_100BASE_TX:
293 		case ICE_PHY_TYPE_LOW_1000BASE_T:
294 		case ICE_PHY_TYPE_LOW_2500BASE_T:
295 		case ICE_PHY_TYPE_LOW_5GBASE_T:
296 		case ICE_PHY_TYPE_LOW_10GBASE_T:
297 		case ICE_PHY_TYPE_LOW_25GBASE_T:
298 			return ICE_MEDIA_BASET;
299 		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
300 		case ICE_PHY_TYPE_LOW_25GBASE_CR:
301 		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
302 		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
303 		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
304 		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
305 		case ICE_PHY_TYPE_LOW_50GBASE_CP:
306 		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
307 		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
308 		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
309 			return ICE_MEDIA_DA;
310 		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
311 		case ICE_PHY_TYPE_LOW_40G_XLAUI:
312 		case ICE_PHY_TYPE_LOW_50G_LAUI2:
313 		case ICE_PHY_TYPE_LOW_50G_AUI2:
314 		case ICE_PHY_TYPE_LOW_50G_AUI1:
315 		case ICE_PHY_TYPE_LOW_100G_AUI4:
316 		case ICE_PHY_TYPE_LOW_100G_CAUI4:
317 			if (ice_is_media_cage_present(pi))
318 				return ICE_MEDIA_DA;
319 			/* fall-through */
320 		case ICE_PHY_TYPE_LOW_1000BASE_KX:
321 		case ICE_PHY_TYPE_LOW_2500BASE_KX:
322 		case ICE_PHY_TYPE_LOW_2500BASE_X:
323 		case ICE_PHY_TYPE_LOW_5GBASE_KR:
324 		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
325 		case ICE_PHY_TYPE_LOW_25GBASE_KR:
326 		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
327 		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
328 		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
329 		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
330 		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
331 		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
332 		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
333 			return ICE_MEDIA_BACKPLANE;
334 		}
335 	} else {
336 		switch (hw_link_info->phy_type_high) {
337 		case ICE_PHY_TYPE_HIGH_100G_AUI2:
338 			if (ice_is_media_cage_present(pi))
339 				return ICE_MEDIA_DA;
340 			/* fall-through */
341 		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
342 			return ICE_MEDIA_BACKPLANE;
343 		}
344 	}
345 	return ICE_MEDIA_UNKNOWN;
346 }
347 
348 /**
349  * ice_aq_get_link_info
350  * @pi: port information structure
351  * @ena_lse: enable/disable LinkStatusEvent reporting
352  * @link: pointer to link status structure - optional
353  * @cd: pointer to command details structure or NULL
354  *
355  * Get Link Status (0x607). Returns the link status of the adapter.
356  */
357 enum ice_status
358 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
359 		     struct ice_link_status *link, struct ice_sq_cd *cd)
360 {
361 	struct ice_aqc_get_link_status_data link_data = { 0 };
362 	struct ice_aqc_get_link_status *resp;
363 	struct ice_link_status *li_old, *li;
364 	enum ice_media_type *hw_media_type;
365 	struct ice_fc_info *hw_fc_info;
366 	bool tx_pause, rx_pause;
367 	struct ice_aq_desc desc;
368 	enum ice_status status;
369 	struct ice_hw *hw;
370 	u16 cmd_flags;
371 
372 	if (!pi)
373 		return ICE_ERR_PARAM;
374 	hw = pi->hw;
375 
376 	li_old = &pi->phy.link_info_old;
377 	hw_media_type = &pi->phy.media_type;
378 	li = &pi->phy.link_info;
379 	hw_fc_info = &pi->fc;
380 
381 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
382 	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
383 	resp = &desc.params.get_link_status;
384 	resp->cmd_flags = CPU_TO_LE16(cmd_flags);
385 	resp->lport_num = pi->lport;
386 
387 	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
388 
389 	if (status != ICE_SUCCESS)
390 		return status;
391 
392 	/* save off old link status information */
393 	*li_old = *li;
394 
395 	/* update current link status information */
396 	li->link_speed = LE16_TO_CPU(link_data.link_speed);
397 	li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
398 	li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
399 	*hw_media_type = ice_get_media_type(pi);
400 	li->link_info = link_data.link_info;
401 	li->an_info = link_data.an_info;
402 	li->ext_info = link_data.ext_info;
403 	li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
404 	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
405 	li->topo_media_conflict = link_data.topo_media_conflict;
406 	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
407 				      ICE_AQ_CFG_PACING_TYPE_M);
408 
409 	/* update fc info */
410 	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
411 	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
412 	if (tx_pause && rx_pause)
413 		hw_fc_info->current_mode = ICE_FC_FULL;
414 	else if (tx_pause)
415 		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
416 	else if (rx_pause)
417 		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
418 	else
419 		hw_fc_info->current_mode = ICE_FC_NONE;
420 
421 	li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
422 
423 	ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
424 	ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
425 		  (unsigned long long)li->phy_type_low);
426 	ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
427 		  (unsigned long long)li->phy_type_high);
428 	ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
429 	ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
430 	ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
431 	ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
432 	ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
433 	ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
434 	ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
435 
436 	/* save link status information */
437 	if (link)
438 		*link = *li;
439 
440 	/* flag cleared so calling functions don't call AQ again */
441 	pi->phy.get_link_info = false;
442 
443 	return ICE_SUCCESS;
444 }
445 
446 /**
447  * ice_aq_set_mac_cfg
448  * @hw: pointer to the HW struct
449  * @max_frame_size: Maximum Frame Size to be supported
450  * @cd: pointer to command details structure or NULL
451  *
452  * Set MAC configuration (0x0603)
453  */
454 enum ice_status
455 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
456 {
457 	u16 fc_threshold_val, tx_timer_val;
458 	struct ice_aqc_set_mac_cfg *cmd;
459 	struct ice_aq_desc desc;
460 	u32 reg_val;
461 
462 	cmd = &desc.params.set_mac_cfg;
463 
464 	if (max_frame_size == 0)
465 		return ICE_ERR_PARAM;
466 
467 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
468 
469 	cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
470 
471 	/* We read back the transmit timer and fc threshold value of
472 	 * LFC. Thus, we will use index =
473 	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
474 	 *
475 	 * Also, because we are opearating on transmit timer and fc
476 	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
477 	 */
478 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
479 
480 	/* Retrieve the transmit timer */
481 	reg_val = rd32(hw,
482 		       PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
483 	tx_timer_val = reg_val &
484 		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
485 	cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
486 
487 	/* Retrieve the fc threshold */
488 	reg_val = rd32(hw,
489 		       PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
490 	fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0);
491 	cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val);
492 
493 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
494 }
495 
496 /**
497  * ice_init_fltr_mgmt_struct - initializes filter management list and locks
498  * @hw: pointer to the HW struct
499  */
500 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
501 {
502 	struct ice_switch_info *sw;
503 
504 	hw->switch_info = (struct ice_switch_info *)
505 			  ice_malloc(hw, sizeof(*hw->switch_info));
506 
507 	sw = hw->switch_info;
508 
509 	if (!sw)
510 		return ICE_ERR_NO_MEMORY;
511 
512 	INIT_LIST_HEAD(&sw->vsi_list_map_head);
513 
514 	return ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
515 }
516 
517 /**
518  * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
519  * @hw: pointer to the HW struct
520  */
521 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
522 {
523 	struct ice_switch_info *sw = hw->switch_info;
524 	struct ice_vsi_list_map_info *v_pos_map;
525 	struct ice_vsi_list_map_info *v_tmp_map;
526 	struct ice_sw_recipe *recps;
527 	u8 i;
528 
529 	LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
530 				 ice_vsi_list_map_info, list_entry) {
531 		LIST_DEL(&v_pos_map->list_entry);
532 		ice_free(hw, v_pos_map);
533 	}
534 	recps = hw->switch_info->recp_list;
535 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
536 		struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
537 
538 		recps[i].root_rid = i;
539 		LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
540 					 &recps[i].rg_list, ice_recp_grp_entry,
541 					 l_entry) {
542 			LIST_DEL(&rg_entry->l_entry);
543 			ice_free(hw, rg_entry);
544 		}
545 
546 		if (recps[i].adv_rule) {
547 			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
548 			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
549 
550 			ice_destroy_lock(&recps[i].filt_rule_lock);
551 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
552 						 &recps[i].filt_rules,
553 						 ice_adv_fltr_mgmt_list_entry,
554 						 list_entry) {
555 				LIST_DEL(&lst_itr->list_entry);
556 				ice_free(hw, lst_itr->lkups);
557 				ice_free(hw, lst_itr);
558 			}
559 		} else {
560 			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
561 
562 			ice_destroy_lock(&recps[i].filt_rule_lock);
563 			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
564 						 &recps[i].filt_rules,
565 						 ice_fltr_mgmt_list_entry,
566 						 list_entry) {
567 				LIST_DEL(&lst_itr->list_entry);
568 				ice_free(hw, lst_itr);
569 			}
570 		}
571 		if (recps[i].root_buf)
572 			ice_free(hw, recps[i].root_buf);
573 	}
574 	ice_rm_all_sw_replay_rule_info(hw);
575 	ice_free(hw, sw->recp_list);
576 	ice_free(hw, sw);
577 }
578 
579 /**
580  * ice_get_itr_intrl_gran
581  * @hw: pointer to the HW struct
582  *
583  * Determines the ITR/INTRL granularities based on the maximum aggregate
584  * bandwidth according to the device's configuration during power-on.
585  */
586 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
587 {
588 	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
589 			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
590 			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
591 
592 	switch (max_agg_bw) {
593 	case ICE_MAX_AGG_BW_200G:
594 	case ICE_MAX_AGG_BW_100G:
595 	case ICE_MAX_AGG_BW_50G:
596 		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
597 		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
598 		break;
599 	case ICE_MAX_AGG_BW_25G:
600 		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
601 		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
602 		break;
603 	}
604 }
605 
606 /**
607  * ice_print_rollback_msg - print FW rollback message
608  * @hw: pointer to the hardware structure
609  */
610 void ice_print_rollback_msg(struct ice_hw *hw)
611 {
612 	char nvm_str[ICE_NVM_VER_LEN] = { 0 };
613 	struct ice_nvm_info *nvm = &hw->nvm;
614 	struct ice_orom_info *orom;
615 
616 	orom = &nvm->orom;
617 
618 	SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
619 		 nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
620 		 orom->build, orom->patch);
621 	ice_warn(hw,
622 		 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
623 		 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
624 }
625 
626 /**
627  * ice_init_hw - main hardware initialization routine
628  * @hw: pointer to the hardware structure
629  */
630 enum ice_status ice_init_hw(struct ice_hw *hw)
631 {
632 	struct ice_aqc_get_phy_caps_data *pcaps;
633 	enum ice_status status;
634 	u16 mac_buf_len;
635 	void *mac_buf;
636 
637 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
638 
639 	/* Set MAC type based on DeviceID */
640 	status = ice_set_mac_type(hw);
641 	if (status)
642 		return status;
643 
644 	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
645 			 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
646 		PF_FUNC_RID_FUNCTION_NUMBER_S;
647 
648 	status = ice_reset(hw, ICE_RESET_PFR);
649 	if (status)
650 		return status;
651 
652 	ice_get_itr_intrl_gran(hw);
653 
654 	status = ice_create_all_ctrlq(hw);
655 	if (status)
656 		goto err_unroll_cqinit;
657 
658 	status = ice_init_nvm(hw);
659 	if (status)
660 		goto err_unroll_cqinit;
661 
662 	if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
663 		ice_print_rollback_msg(hw);
664 
665 	status = ice_clear_pf_cfg(hw);
666 	if (status)
667 		goto err_unroll_cqinit;
668 
669 	ice_clear_pxe_mode(hw);
670 
671 	status = ice_get_caps(hw);
672 	if (status)
673 		goto err_unroll_cqinit;
674 
675 	hw->port_info = (struct ice_port_info *)
676 			ice_malloc(hw, sizeof(*hw->port_info));
677 	if (!hw->port_info) {
678 		status = ICE_ERR_NO_MEMORY;
679 		goto err_unroll_cqinit;
680 	}
681 
682 	/* set the back pointer to HW */
683 	hw->port_info->hw = hw;
684 
685 	/* Initialize port_info struct with switch configuration data */
686 	status = ice_get_initial_sw_cfg(hw);
687 	if (status)
688 		goto err_unroll_alloc;
689 
690 	hw->evb_veb = true;
691 	/* Query the allocated resources for Tx scheduler */
692 	status = ice_sched_query_res_alloc(hw);
693 	if (status) {
694 		ice_debug(hw, ICE_DBG_SCHED,
695 			  "Failed to get scheduler allocated resources\n");
696 		goto err_unroll_alloc;
697 	}
698 	ice_sched_get_psm_clk_freq(hw);
699 
700 	/* Initialize port_info struct with scheduler data */
701 	status = ice_sched_init_port(hw->port_info);
702 	if (status)
703 		goto err_unroll_sched;
704 
705 	pcaps = (struct ice_aqc_get_phy_caps_data *)
706 		ice_malloc(hw, sizeof(*pcaps));
707 	if (!pcaps) {
708 		status = ICE_ERR_NO_MEMORY;
709 		goto err_unroll_sched;
710 	}
711 
712 	/* Initialize port_info struct with PHY capabilities */
713 	status = ice_aq_get_phy_caps(hw->port_info, false,
714 				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
715 	ice_free(hw, pcaps);
716 	if (status)
717 		goto err_unroll_sched;
718 
719 	/* Initialize port_info struct with link information */
720 	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
721 	if (status)
722 		goto err_unroll_sched;
723 	/* need a valid SW entry point to build a Tx tree */
724 	if (!hw->sw_entry_point_layer) {
725 		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
726 		status = ICE_ERR_CFG;
727 		goto err_unroll_sched;
728 	}
729 	INIT_LIST_HEAD(&hw->agg_list);
730 	/* Initialize max burst size */
731 	if (!hw->max_burst_size)
732 		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
733 
734 	status = ice_init_fltr_mgmt_struct(hw);
735 	if (status)
736 		goto err_unroll_sched;
737 
738 	/* Get MAC information */
739 	/* A single port can report up to two (LAN and WoL) addresses */
740 	mac_buf = ice_calloc(hw, 2,
741 			     sizeof(struct ice_aqc_manage_mac_read_resp));
742 	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
743 
744 	if (!mac_buf) {
745 		status = ICE_ERR_NO_MEMORY;
746 		goto err_unroll_fltr_mgmt_struct;
747 	}
748 
749 	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
750 	ice_free(hw, mac_buf);
751 
752 	if (status)
753 		goto err_unroll_fltr_mgmt_struct;
754 	status = ice_init_hw_tbls(hw);
755 	if (status)
756 		goto err_unroll_fltr_mgmt_struct;
757 	ice_init_lock(&hw->tnl_lock);
758 	return ICE_SUCCESS;
759 
760 err_unroll_fltr_mgmt_struct:
761 	ice_cleanup_fltr_mgmt_struct(hw);
762 err_unroll_sched:
763 	ice_sched_cleanup_all(hw);
764 err_unroll_alloc:
765 	ice_free(hw, hw->port_info);
766 	hw->port_info = NULL;
767 err_unroll_cqinit:
768 	ice_destroy_all_ctrlq(hw);
769 	return status;
770 }
771 
772 /**
773  * ice_deinit_hw - unroll initialization operations done by ice_init_hw
774  * @hw: pointer to the hardware structure
775  *
776  * This should be called only during nominal operation, not as a result of
777  * ice_init_hw() failing since ice_init_hw() will take care of unrolling
778  * applicable initializations if it fails for any reason.
779  */
780 void ice_deinit_hw(struct ice_hw *hw)
781 {
782 	ice_cleanup_fltr_mgmt_struct(hw);
783 
784 	ice_sched_cleanup_all(hw);
785 	ice_sched_clear_agg(hw);
786 	ice_free_seg(hw);
787 	ice_free_hw_tbls(hw);
788 	ice_destroy_lock(&hw->tnl_lock);
789 
790 	if (hw->port_info) {
791 		ice_free(hw, hw->port_info);
792 		hw->port_info = NULL;
793 	}
794 
795 	ice_destroy_all_ctrlq(hw);
796 
797 	/* Clear VSI contexts if not already cleared */
798 	ice_clear_all_vsi_ctx(hw);
799 }
800 
801 /**
802  * ice_check_reset - Check to see if a global reset is complete
803  * @hw: pointer to the hardware structure
804  */
805 enum ice_status ice_check_reset(struct ice_hw *hw)
806 {
807 	u32 cnt, reg = 0, grst_delay, uld_mask;
808 
809 	/* Poll for Device Active state in case a recent CORER, GLOBR,
810 	 * or EMPR has occurred. The grst delay value is in 100ms units.
811 	 * Add 1sec for outstanding AQ commands that can take a long time.
812 	 */
813 	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
814 		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
815 
816 	for (cnt = 0; cnt < grst_delay; cnt++) {
817 		ice_msec_delay(100, true);
818 		reg = rd32(hw, GLGEN_RSTAT);
819 		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
820 			break;
821 	}
822 
823 	if (cnt == grst_delay) {
824 		ice_debug(hw, ICE_DBG_INIT,
825 			  "Global reset polling failed to complete.\n");
826 		return ICE_ERR_RESET_FAILED;
827 	}
828 
829 #define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
830 				 GLNVM_ULD_PCIER_DONE_1_M |\
831 				 GLNVM_ULD_CORER_DONE_M |\
832 				 GLNVM_ULD_GLOBR_DONE_M |\
833 				 GLNVM_ULD_POR_DONE_M |\
834 				 GLNVM_ULD_POR_DONE_1_M |\
835 				 GLNVM_ULD_PCIER_DONE_2_M)
836 
837 	uld_mask = ICE_RESET_DONE_MASK;
838 
839 	/* Device is Active; check Global Reset processes are done */
840 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
841 		reg = rd32(hw, GLNVM_ULD) & uld_mask;
842 		if (reg == uld_mask) {
843 			ice_debug(hw, ICE_DBG_INIT,
844 				  "Global reset processes done. %d\n", cnt);
845 			break;
846 		}
847 		ice_msec_delay(10, true);
848 	}
849 
850 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
851 		ice_debug(hw, ICE_DBG_INIT,
852 			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
853 			  reg);
854 		return ICE_ERR_RESET_FAILED;
855 	}
856 
857 	return ICE_SUCCESS;
858 }
859 
860 /**
861  * ice_pf_reset - Reset the PF
862  * @hw: pointer to the hardware structure
863  *
864  * If a global reset has been triggered, this function checks
865  * for its completion and then issues the PF reset
866  */
867 static enum ice_status ice_pf_reset(struct ice_hw *hw)
868 {
869 	u32 cnt, reg;
870 
871 	/* If at function entry a global reset was already in progress, i.e.
872 	 * state is not 'device active' or any of the reset done bits are not
873 	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
874 	 * global reset is done.
875 	 */
876 	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
877 	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
878 		/* poll on global reset currently in progress until done */
879 		if (ice_check_reset(hw))
880 			return ICE_ERR_RESET_FAILED;
881 
882 		return ICE_SUCCESS;
883 	}
884 
885 	/* Reset the PF */
886 	reg = rd32(hw, PFGEN_CTRL);
887 
888 	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
889 
890 	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
891 		reg = rd32(hw, PFGEN_CTRL);
892 		if (!(reg & PFGEN_CTRL_PFSWR_M))
893 			break;
894 
895 		ice_msec_delay(1, true);
896 	}
897 
898 	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
899 		ice_debug(hw, ICE_DBG_INIT,
900 			  "PF reset polling failed to complete.\n");
901 		return ICE_ERR_RESET_FAILED;
902 	}
903 
904 	return ICE_SUCCESS;
905 }
906 
907 /**
908  * ice_reset - Perform different types of reset
909  * @hw: pointer to the hardware structure
910  * @req: reset request
911  *
912  * This function triggers a reset as specified by the req parameter.
913  *
914  * Note:
915  * If anything other than a PF reset is triggered, PXE mode is restored.
916  * This has to be cleared using ice_clear_pxe_mode again, once the AQ
917  * interface has been restored in the rebuild flow.
918  */
919 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
920 {
921 	u32 val = 0;
922 
923 	switch (req) {
924 	case ICE_RESET_PFR:
925 		return ice_pf_reset(hw);
926 	case ICE_RESET_CORER:
927 		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
928 		val = GLGEN_RTRIG_CORER_M;
929 		break;
930 	case ICE_RESET_GLOBR:
931 		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
932 		val = GLGEN_RTRIG_GLOBR_M;
933 		break;
934 	default:
935 		return ICE_ERR_PARAM;
936 	}
937 
938 	val |= rd32(hw, GLGEN_RTRIG);
939 	wr32(hw, GLGEN_RTRIG, val);
940 	ice_flush(hw);
941 
942 	/* wait for the FW to be ready */
943 	return ice_check_reset(hw);
944 }
945 
946 /**
947  * ice_copy_rxq_ctx_to_hw
948  * @hw: pointer to the hardware structure
949  * @ice_rxq_ctx: pointer to the rxq context
950  * @rxq_index: the index of the Rx queue
951  *
952  * Copies rxq context from dense structure to HW register space
953  */
954 static enum ice_status
955 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
956 {
957 	u8 i;
958 
959 	if (!ice_rxq_ctx)
960 		return ICE_ERR_BAD_PTR;
961 
962 	if (rxq_index > QRX_CTRL_MAX_INDEX)
963 		return ICE_ERR_PARAM;
964 
965 	/* Copy each dword separately to HW */
966 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
967 		wr32(hw, QRX_CONTEXT(i, rxq_index),
968 		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
969 
970 		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
971 			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
972 	}
973 
974 	return ICE_SUCCESS;
975 }
976 
977 /* LAN Rx Queue Context */
978 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
979 	/* Field		Width	LSB */
980 	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
981 	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
982 	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
983 	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
984 	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
985 	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
986 	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
987 	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
988 	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
989 	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
990 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
991 	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
992 	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
993 	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
994 	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
995 	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
996 	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
997 	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
998 	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
999 	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1000 	{ 0 }
1001 };
1002 
1003 /**
1004  * ice_write_rxq_ctx
1005  * @hw: pointer to the hardware structure
1006  * @rlan_ctx: pointer to the rxq context
1007  * @rxq_index: the index of the Rx queue
1008  *
1009  * Converts rxq context from sparse to dense structure and then writes
1010  * it to HW register space and enables the hardware to prefetch descriptors
1011  * instead of only fetching them on demand
1012  */
1013 enum ice_status
1014 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1015 		  u32 rxq_index)
1016 {
1017 	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1018 
1019 	if (!rlan_ctx)
1020 		return ICE_ERR_BAD_PTR;
1021 
1022 	rlan_ctx->prefena = 1;
1023 
1024 	ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1025 	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1026 }
1027 
1028 /**
1029  * ice_clear_rxq_ctx
1030  * @hw: pointer to the hardware structure
1031  * @rxq_index: the index of the Rx queue to clear
1032  *
1033  * Clears rxq context in HW register space
1034  */
1035 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1036 {
1037 	u8 i;
1038 
1039 	if (rxq_index > QRX_CTRL_MAX_INDEX)
1040 		return ICE_ERR_PARAM;
1041 
1042 	/* Clear each dword register separately */
1043 	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1044 		wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1045 
1046 	return ICE_SUCCESS;
1047 }
1048 
1049 /* LAN Tx Queue Context */
1050 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1051 				    /* Field			Width	LSB */
1052 	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1053 	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1054 	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1055 	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1056 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1057 	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1058 	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1059 	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1060 	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1061 	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1062 	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1063 	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1064 	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1065 	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1066 	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1067 	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1068 	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1069 	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1070 	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1071 	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1072 	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1073 	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1074 	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1075 	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1076 	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1077 	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1078 	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1079 	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1080 	{ 0 }
1081 };
1082 
1083 /**
1084  * ice_copy_tx_cmpltnq_ctx_to_hw
1085  * @hw: pointer to the hardware structure
1086  * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1087  * @tx_cmpltnq_index: the index of the completion queue
1088  *
1089  * Copies Tx completion queue context from dense structure to HW register space
1090  */
1091 static enum ice_status
1092 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1093 			      u32 tx_cmpltnq_index)
1094 {
1095 	u8 i;
1096 
1097 	if (!ice_tx_cmpltnq_ctx)
1098 		return ICE_ERR_BAD_PTR;
1099 
1100 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1101 		return ICE_ERR_PARAM;
1102 
1103 	/* Copy each dword separately to HW */
1104 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1105 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1106 		     *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1107 
1108 		ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1109 			  *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1110 	}
1111 
1112 	return ICE_SUCCESS;
1113 }
1114 
1115 /* LAN Tx Completion Queue Context */
1116 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1117 				       /* Field			Width   LSB */
1118 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base,			57,	0),
1119 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len,		18,	64),
1120 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation,		1,	96),
1121 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr,		22,	97),
1122 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num,		3,	128),
1123 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num,		10,	131),
1124 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type,		2,	141),
1125 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr,		1,	160),
1126 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid,		8,	161),
1127 	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache,		512,	192),
1128 	{ 0 }
1129 };
1130 
1131 /**
1132  * ice_write_tx_cmpltnq_ctx
1133  * @hw: pointer to the hardware structure
1134  * @tx_cmpltnq_ctx: pointer to the completion queue context
1135  * @tx_cmpltnq_index: the index of the completion queue
1136  *
1137  * Converts completion queue context from sparse to dense structure and then
1138  * writes it to HW register space
1139  */
1140 enum ice_status
1141 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1142 			 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1143 			 u32 tx_cmpltnq_index)
1144 {
1145 	u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1146 
1147 	ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1148 	return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1149 }
1150 
1151 /**
1152  * ice_clear_tx_cmpltnq_ctx
1153  * @hw: pointer to the hardware structure
1154  * @tx_cmpltnq_index: the index of the completion queue to clear
1155  *
1156  * Clears Tx completion queue context in HW register space
1157  */
1158 enum ice_status
1159 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1160 {
1161 	u8 i;
1162 
1163 	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1164 		return ICE_ERR_PARAM;
1165 
1166 	/* Clear each dword register separately */
1167 	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1168 		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1169 
1170 	return ICE_SUCCESS;
1171 }
1172 
1173 /**
1174  * ice_copy_tx_drbell_q_ctx_to_hw
1175  * @hw: pointer to the hardware structure
1176  * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1177  * @tx_drbell_q_index: the index of the doorbell queue
1178  *
1179  * Copies doorbell queue context from dense structure to HW register space
1180  */
1181 static enum ice_status
1182 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1183 			       u32 tx_drbell_q_index)
1184 {
1185 	u8 i;
1186 
1187 	if (!ice_tx_drbell_q_ctx)
1188 		return ICE_ERR_BAD_PTR;
1189 
1190 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1191 		return ICE_ERR_PARAM;
1192 
1193 	/* Copy each dword separately to HW */
1194 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1195 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1196 		     *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1197 
1198 		ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1199 			  *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1200 	}
1201 
1202 	return ICE_SUCCESS;
1203 }
1204 
1205 /* LAN Tx Doorbell Queue Context info */
1206 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1207 					/* Field		Width   LSB */
1208 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, base,		57,	0),
1209 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len,		13,	64),
1210 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num,		3,	80),
1211 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num,		8,	84),
1212 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type,		2,	94),
1213 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid,		8,	96),
1214 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd,		1,	104),
1215 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr,		1,	108),
1216 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en,		1,	112),
1217 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head,		13,	128),
1218 	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail,		13,	144),
1219 	{ 0 }
1220 };
1221 
1222 /**
1223  * ice_write_tx_drbell_q_ctx
1224  * @hw: pointer to the hardware structure
1225  * @tx_drbell_q_ctx: pointer to the doorbell queue context
1226  * @tx_drbell_q_index: the index of the doorbell queue
1227  *
1228  * Converts doorbell queue context from sparse to dense structure and then
1229  * writes it to HW register space
1230  */
1231 enum ice_status
1232 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1233 			  struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1234 			  u32 tx_drbell_q_index)
1235 {
1236 	u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1237 
1238 	ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info);
1239 	return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1240 }
1241 
1242 /**
1243  * ice_clear_tx_drbell_q_ctx
1244  * @hw: pointer to the hardware structure
1245  * @tx_drbell_q_index: the index of the doorbell queue to clear
1246  *
1247  * Clears doorbell queue context in HW register space
1248  */
1249 enum ice_status
1250 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1251 {
1252 	u8 i;
1253 
1254 	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1255 		return ICE_ERR_PARAM;
1256 
1257 	/* Clear each dword register separately */
1258 	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1259 		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1260 
1261 	return ICE_SUCCESS;
1262 }
1263 
1264 /* FW Admin Queue command wrappers */
1265 
1266 /**
1267  * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1268  * @hw: pointer to the HW struct
1269  * @desc: descriptor describing the command
1270  * @buf: buffer to use for indirect commands (NULL for direct commands)
1271  * @buf_size: size of buffer for indirect commands (0 for direct commands)
1272  * @cd: pointer to command details structure
1273  *
1274  * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1275  */
1276 enum ice_status
1277 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1278 		u16 buf_size, struct ice_sq_cd *cd)
1279 {
1280 	return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1281 }
1282 
1283 /**
1284  * ice_aq_get_fw_ver
1285  * @hw: pointer to the HW struct
1286  * @cd: pointer to command details structure or NULL
1287  *
1288  * Get the firmware version (0x0001) from the admin queue commands
1289  */
1290 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1291 {
1292 	struct ice_aqc_get_ver *resp;
1293 	struct ice_aq_desc desc;
1294 	enum ice_status status;
1295 
1296 	resp = &desc.params.get_ver;
1297 
1298 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1299 
1300 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1301 
1302 	if (!status) {
1303 		hw->fw_branch = resp->fw_branch;
1304 		hw->fw_maj_ver = resp->fw_major;
1305 		hw->fw_min_ver = resp->fw_minor;
1306 		hw->fw_patch = resp->fw_patch;
1307 		hw->fw_build = LE32_TO_CPU(resp->fw_build);
1308 		hw->api_branch = resp->api_branch;
1309 		hw->api_maj_ver = resp->api_major;
1310 		hw->api_min_ver = resp->api_minor;
1311 		hw->api_patch = resp->api_patch;
1312 	}
1313 
1314 	return status;
1315 }
1316 
1317 /**
1318  * ice_aq_send_driver_ver
1319  * @hw: pointer to the HW struct
1320  * @dv: driver's major, minor version
1321  * @cd: pointer to command details structure or NULL
1322  *
1323  * Send the driver version (0x0002) to the firmware
1324  */
1325 enum ice_status
1326 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1327 		       struct ice_sq_cd *cd)
1328 {
1329 	struct ice_aqc_driver_ver *cmd;
1330 	struct ice_aq_desc desc;
1331 	u16 len;
1332 
1333 	cmd = &desc.params.driver_ver;
1334 
1335 	if (!dv)
1336 		return ICE_ERR_PARAM;
1337 
1338 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1339 
1340 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1341 	cmd->major_ver = dv->major_ver;
1342 	cmd->minor_ver = dv->minor_ver;
1343 	cmd->build_ver = dv->build_ver;
1344 	cmd->subbuild_ver = dv->subbuild_ver;
1345 
1346 	len = 0;
1347 	while (len < sizeof(dv->driver_string) &&
1348 	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1349 		len++;
1350 
1351 	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1352 }
1353 
1354 /**
1355  * ice_aq_q_shutdown
1356  * @hw: pointer to the HW struct
1357  * @unloading: is the driver unloading itself
1358  *
1359  * Tell the Firmware that we're shutting down the AdminQ and whether
1360  * or not the driver is unloading as well (0x0003).
1361  */
1362 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1363 {
1364 	struct ice_aqc_q_shutdown *cmd;
1365 	struct ice_aq_desc desc;
1366 
1367 	cmd = &desc.params.q_shutdown;
1368 
1369 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1370 
1371 	if (unloading)
1372 		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1373 
1374 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1375 }
1376 
1377 /**
1378  * ice_aq_req_res
1379  * @hw: pointer to the HW struct
1380  * @res: resource ID
1381  * @access: access type
1382  * @sdp_number: resource number
1383  * @timeout: the maximum time in ms that the driver may hold the resource
1384  * @cd: pointer to command details structure or NULL
1385  *
1386  * Requests common resource using the admin queue commands (0x0008).
1387  * When attempting to acquire the Global Config Lock, the driver can
1388  * learn of three states:
1389  *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1390  *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1391  *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1392  *                          successfully downloaded the package; the driver does
1393  *                          not have to download the package and can continue
1394  *                          loading
1395  *
1396  * Note that if the caller is in an acquire lock, perform action, release lock
1397  * phase of operation, it is possible that the FW may detect a timeout and issue
1398  * a CORER. In this case, the driver will receive a CORER interrupt and will
1399  * have to determine its cause. The calling thread that is handling this flow
1400  * will likely get an error propagated back to it indicating the Download
1401  * Package, Update Package or the Release Resource AQ commands timed out.
1402  */
1403 static enum ice_status
1404 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1405 	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1406 	       struct ice_sq_cd *cd)
1407 {
1408 	struct ice_aqc_req_res *cmd_resp;
1409 	struct ice_aq_desc desc;
1410 	enum ice_status status;
1411 
1412 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1413 
1414 	cmd_resp = &desc.params.res_owner;
1415 
1416 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1417 
1418 	cmd_resp->res_id = CPU_TO_LE16(res);
1419 	cmd_resp->access_type = CPU_TO_LE16(access);
1420 	cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1421 	cmd_resp->timeout = CPU_TO_LE32(*timeout);
1422 	*timeout = 0;
1423 
1424 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1425 
1426 	/* The completion specifies the maximum time in ms that the driver
1427 	 * may hold the resource in the Timeout field.
1428 	 */
1429 
1430 	/* Global config lock response utilizes an additional status field.
1431 	 *
1432 	 * If the Global config lock resource is held by some other driver, the
1433 	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1434 	 * and the timeout field indicates the maximum time the current owner
1435 	 * of the resource has to free it.
1436 	 */
1437 	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1438 		if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1439 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1440 			return ICE_SUCCESS;
1441 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1442 			   ICE_AQ_RES_GLBL_IN_PROG) {
1443 			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1444 			return ICE_ERR_AQ_ERROR;
1445 		} else if (LE16_TO_CPU(cmd_resp->status) ==
1446 			   ICE_AQ_RES_GLBL_DONE) {
1447 			return ICE_ERR_AQ_NO_WORK;
1448 		}
1449 
1450 		/* invalid FW response, force a timeout immediately */
1451 		*timeout = 0;
1452 		return ICE_ERR_AQ_ERROR;
1453 	}
1454 
1455 	/* If the resource is held by some other driver, the command completes
1456 	 * with a busy return value and the timeout field indicates the maximum
1457 	 * time the current owner of the resource has to free it.
1458 	 */
1459 	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1460 		*timeout = LE32_TO_CPU(cmd_resp->timeout);
1461 
1462 	return status;
1463 }
1464 
1465 /**
1466  * ice_aq_release_res
1467  * @hw: pointer to the HW struct
1468  * @res: resource ID
1469  * @sdp_number: resource number
1470  * @cd: pointer to command details structure or NULL
1471  *
1472  * release common resource using the admin queue commands (0x0009)
1473  */
1474 static enum ice_status
1475 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1476 		   struct ice_sq_cd *cd)
1477 {
1478 	struct ice_aqc_req_res *cmd;
1479 	struct ice_aq_desc desc;
1480 
1481 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1482 
1483 	cmd = &desc.params.res_owner;
1484 
1485 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1486 
1487 	cmd->res_id = CPU_TO_LE16(res);
1488 	cmd->res_number = CPU_TO_LE32(sdp_number);
1489 
1490 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1491 }
1492 
1493 /**
1494  * ice_acquire_res
1495  * @hw: pointer to the HW structure
1496  * @res: resource ID
1497  * @access: access type (read or write)
1498  * @timeout: timeout in milliseconds
1499  *
1500  * This function will attempt to acquire the ownership of a resource.
1501  */
1502 enum ice_status
1503 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1504 		enum ice_aq_res_access_type access, u32 timeout)
1505 {
1506 #define ICE_RES_POLLING_DELAY_MS	10
1507 	u32 delay = ICE_RES_POLLING_DELAY_MS;
1508 	u32 time_left = timeout;
1509 	enum ice_status status;
1510 
1511 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1512 
1513 	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1514 
1515 	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1516 	 * previously acquired the resource and performed any necessary updates;
1517 	 * in this case the caller does not obtain the resource and has no
1518 	 * further work to do.
1519 	 */
1520 	if (status == ICE_ERR_AQ_NO_WORK)
1521 		goto ice_acquire_res_exit;
1522 
1523 	if (status)
1524 		ice_debug(hw, ICE_DBG_RES,
1525 			  "resource %d acquire type %d failed.\n", res, access);
1526 
1527 	/* If necessary, poll until the current lock owner timeouts */
1528 	timeout = time_left;
1529 	while (status && timeout && time_left) {
1530 		ice_msec_delay(delay, true);
1531 		timeout = (timeout > delay) ? timeout - delay : 0;
1532 		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1533 
1534 		if (status == ICE_ERR_AQ_NO_WORK)
1535 			/* lock free, but no work to do */
1536 			break;
1537 
1538 		if (!status)
1539 			/* lock acquired */
1540 			break;
1541 	}
1542 	if (status && status != ICE_ERR_AQ_NO_WORK)
1543 		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1544 
1545 ice_acquire_res_exit:
1546 	if (status == ICE_ERR_AQ_NO_WORK) {
1547 		if (access == ICE_RES_WRITE)
1548 			ice_debug(hw, ICE_DBG_RES,
1549 				  "resource indicates no work to do.\n");
1550 		else
1551 			ice_debug(hw, ICE_DBG_RES,
1552 				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1553 	}
1554 	return status;
1555 }
1556 
1557 /**
1558  * ice_release_res
1559  * @hw: pointer to the HW structure
1560  * @res: resource ID
1561  *
1562  * This function will release a resource using the proper Admin Command.
1563  */
1564 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1565 {
1566 	enum ice_status status;
1567 	u32 total_delay = 0;
1568 
1569 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1570 
1571 	status = ice_aq_release_res(hw, res, 0, NULL);
1572 
1573 	/* there are some rare cases when trying to release the resource
1574 	 * results in an admin queue timeout, so handle them correctly
1575 	 */
1576 	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1577 	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1578 		ice_msec_delay(1, true);
1579 		status = ice_aq_release_res(hw, res, 0, NULL);
1580 		total_delay++;
1581 	}
1582 }
1583 
1584 /**
1585  * ice_aq_alloc_free_res - command to allocate/free resources
1586  * @hw: pointer to the HW struct
1587  * @num_entries: number of resource entries in buffer
1588  * @buf: Indirect buffer to hold data parameters and response
1589  * @buf_size: size of buffer for indirect commands
1590  * @opc: pass in the command opcode
1591  * @cd: pointer to command details structure or NULL
1592  *
1593  * Helper function to allocate/free resources using the admin queue commands
1594  */
1595 enum ice_status
1596 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1597 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1598 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1599 {
1600 	struct ice_aqc_alloc_free_res_cmd *cmd;
1601 	struct ice_aq_desc desc;
1602 
1603 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1604 
1605 	cmd = &desc.params.sw_res_ctrl;
1606 
1607 	if (!buf)
1608 		return ICE_ERR_PARAM;
1609 
1610 	if (buf_size < (num_entries * sizeof(buf->elem[0])))
1611 		return ICE_ERR_PARAM;
1612 
1613 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1614 
1615 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1616 
1617 	cmd->num_entries = CPU_TO_LE16(num_entries);
1618 
1619 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1620 }
1621 
1622 /**
1623  * ice_alloc_hw_res - allocate resource
1624  * @hw: pointer to the HW struct
1625  * @type: type of resource
1626  * @num: number of resources to allocate
1627  * @btm: allocate from bottom
1628  * @res: pointer to array that will receive the resources
1629  */
1630 enum ice_status
1631 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1632 {
1633 	struct ice_aqc_alloc_free_res_elem *buf;
1634 	enum ice_status status;
1635 	u16 buf_len;
1636 
1637 	buf_len = ice_struct_size(buf, elem, num - 1);
1638 	buf = (struct ice_aqc_alloc_free_res_elem *)
1639 		ice_malloc(hw, buf_len);
1640 	if (!buf)
1641 		return ICE_ERR_NO_MEMORY;
1642 
1643 	/* Prepare buffer to allocate resource. */
1644 	buf->num_elems = CPU_TO_LE16(num);
1645 	buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1646 				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1647 	if (btm)
1648 		buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1649 
1650 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1651 				       ice_aqc_opc_alloc_res, NULL);
1652 	if (status)
1653 		goto ice_alloc_res_exit;
1654 
1655 	ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
1656 		   ICE_NONDMA_TO_NONDMA);
1657 
1658 ice_alloc_res_exit:
1659 	ice_free(hw, buf);
1660 	return status;
1661 }
1662 
1663 /**
1664  * ice_free_hw_res - free allocated HW resource
1665  * @hw: pointer to the HW struct
1666  * @type: type of resource to free
1667  * @num: number of resources
1668  * @res: pointer to array that contains the resources to free
1669  */
1670 enum ice_status
1671 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1672 {
1673 	struct ice_aqc_alloc_free_res_elem *buf;
1674 	enum ice_status status;
1675 	u16 buf_len;
1676 
1677 	buf_len = ice_struct_size(buf, elem, num - 1);
1678 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1679 	if (!buf)
1680 		return ICE_ERR_NO_MEMORY;
1681 
1682 	/* Prepare buffer to free resource. */
1683 	buf->num_elems = CPU_TO_LE16(num);
1684 	buf->res_type = CPU_TO_LE16(type);
1685 	ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
1686 		   ICE_NONDMA_TO_NONDMA);
1687 
1688 	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1689 				       ice_aqc_opc_free_res, NULL);
1690 	if (status)
1691 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1692 
1693 	ice_free(hw, buf);
1694 	return status;
1695 }
1696 
1697 /**
1698  * ice_get_num_per_func - determine number of resources per PF
1699  * @hw: pointer to the HW structure
1700  * @max: value to be evenly split between each PF
1701  *
1702  * Determine the number of valid functions by going through the bitmap returned
1703  * from parsing capabilities and use this to calculate the number of resources
1704  * per PF based on the max value passed in.
1705  */
1706 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1707 {
1708 	u8 funcs;
1709 
1710 #define ICE_CAPS_VALID_FUNCS_M	0xFF
1711 	funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1712 			     ICE_CAPS_VALID_FUNCS_M);
1713 
1714 	if (!funcs)
1715 		return 0;
1716 
1717 	return max / funcs;
1718 }
1719 
1720 /**
1721  * ice_print_led_caps - print LED capabilities
1722  * @hw: pointer to the ice_hw instance
1723  * @caps: pointer to common caps instance
1724  * @prefix: string to prefix when printing
1725  * @debug: set to indicate debug print
1726  */
1727 static void
1728 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1729 		   char const *prefix, bool debug)
1730 {
1731 	u8 i;
1732 
1733 	if (debug)
1734 		ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
1735 			  caps->led_pin_num);
1736 	else
1737 		ice_info(hw, "%s: led_pin_num = %d\n", prefix,
1738 			 caps->led_pin_num);
1739 
1740 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
1741 		if (!caps->led[i])
1742 			continue;
1743 
1744 		if (debug)
1745 			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
1746 				  prefix, i, caps->led[i]);
1747 		else
1748 			ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
1749 				 caps->led[i]);
1750 	}
1751 }
1752 
1753 /**
1754  * ice_print_sdp_caps - print SDP capabilities
1755  * @hw: pointer to the ice_hw instance
1756  * @caps: pointer to common caps instance
1757  * @prefix: string to prefix when printing
1758  * @debug: set to indicate debug print
1759  */
1760 static void
1761 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1762 		   char const *prefix, bool debug)
1763 {
1764 	u8 i;
1765 
1766 	if (debug)
1767 		ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
1768 			  caps->sdp_pin_num);
1769 	else
1770 		ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
1771 			 caps->sdp_pin_num);
1772 
1773 	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
1774 		if (!caps->sdp[i])
1775 			continue;
1776 
1777 		if (debug)
1778 			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
1779 				  prefix, i, caps->sdp[i]);
1780 		else
1781 			ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
1782 				 i, caps->sdp[i]);
1783 	}
1784 }
1785 
1786 /**
1787  * ice_parse_caps - parse function/device capabilities
1788  * @hw: pointer to the HW struct
1789  * @buf: pointer to a buffer containing function/device capability records
1790  * @cap_count: number of capability records in the list
1791  * @opc: type of capabilities list to parse
1792  *
1793  * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1794  */
1795 static void
1796 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1797 	       enum ice_adminq_opc opc)
1798 {
1799 	struct ice_aqc_list_caps_elem *cap_resp;
1800 	struct ice_hw_func_caps *func_p = NULL;
1801 	struct ice_hw_dev_caps *dev_p = NULL;
1802 	struct ice_hw_common_caps *caps;
1803 	char const *prefix;
1804 	u32 i;
1805 
1806 	if (!buf)
1807 		return;
1808 
1809 	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1810 
1811 	if (opc == ice_aqc_opc_list_dev_caps) {
1812 		dev_p = &hw->dev_caps;
1813 		caps = &dev_p->common_cap;
1814 		prefix = "dev cap";
1815 	} else if (opc == ice_aqc_opc_list_func_caps) {
1816 		func_p = &hw->func_caps;
1817 		caps = &func_p->common_cap;
1818 		prefix = "func cap";
1819 	} else {
1820 		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1821 		return;
1822 	}
1823 
1824 	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1825 		u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
1826 		u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
1827 		u32 number = LE32_TO_CPU(cap_resp->number);
1828 		u16 cap = LE16_TO_CPU(cap_resp->cap);
1829 
1830 		switch (cap) {
1831 		case ICE_AQC_CAPS_SWITCHING_MODE:
1832 			caps->switching_mode = number;
1833 			ice_debug(hw, ICE_DBG_INIT,
1834 				  "%s: switching_mode = %d\n", prefix,
1835 				  caps->switching_mode);
1836 			break;
1837 		case ICE_AQC_CAPS_MANAGEABILITY_MODE:
1838 			caps->mgmt_mode = number;
1839 			caps->mgmt_protocols_mctp = logical_id;
1840 			ice_debug(hw, ICE_DBG_INIT,
1841 				  "%s: mgmt_mode = %d\n", prefix,
1842 				  caps->mgmt_mode);
1843 			ice_debug(hw, ICE_DBG_INIT,
1844 				  "%s: mgmt_protocols_mctp = %d\n", prefix,
1845 				  caps->mgmt_protocols_mctp);
1846 			break;
1847 		case ICE_AQC_CAPS_OS2BMC:
1848 			caps->os2bmc = number;
1849 			ice_debug(hw, ICE_DBG_INIT,
1850 				  "%s: os2bmc = %d\n", prefix, caps->os2bmc);
1851 			break;
1852 		case ICE_AQC_CAPS_VALID_FUNCTIONS:
1853 			caps->valid_functions = number;
1854 			ice_debug(hw, ICE_DBG_INIT,
1855 				  "%s: valid_functions (bitmap) = %d\n", prefix,
1856 				  caps->valid_functions);
1857 
1858 			/* store func count for resource management purposes */
1859 			if (dev_p)
1860 				dev_p->num_funcs = ice_hweight32(number);
1861 			break;
1862 		case ICE_AQC_CAPS_SRIOV:
1863 			caps->sr_iov_1_1 = (number == 1);
1864 			ice_debug(hw, ICE_DBG_INIT,
1865 				  "%s: sr_iov_1_1 = %d\n", prefix,
1866 				  caps->sr_iov_1_1);
1867 			break;
1868 		case ICE_AQC_CAPS_VF:
1869 			if (dev_p) {
1870 				dev_p->num_vfs_exposed = number;
1871 				ice_debug(hw, ICE_DBG_INIT,
1872 					  "%s: num_vfs_exposed = %d\n", prefix,
1873 					  dev_p->num_vfs_exposed);
1874 			} else if (func_p) {
1875 				func_p->num_allocd_vfs = number;
1876 				func_p->vf_base_id = logical_id;
1877 				ice_debug(hw, ICE_DBG_INIT,
1878 					  "%s: num_allocd_vfs = %d\n", prefix,
1879 					  func_p->num_allocd_vfs);
1880 				ice_debug(hw, ICE_DBG_INIT,
1881 					  "%s: vf_base_id = %d\n", prefix,
1882 					  func_p->vf_base_id);
1883 			}
1884 			break;
1885 		case ICE_AQC_CAPS_802_1QBG:
1886 			caps->evb_802_1_qbg = (number == 1);
1887 			ice_debug(hw, ICE_DBG_INIT,
1888 				  "%s: evb_802_1_qbg = %d\n", prefix, number);
1889 			break;
1890 		case ICE_AQC_CAPS_802_1BR:
1891 			caps->evb_802_1_qbh = (number == 1);
1892 			ice_debug(hw, ICE_DBG_INIT,
1893 				  "%s: evb_802_1_qbh = %d\n", prefix, number);
1894 			break;
1895 		case ICE_AQC_CAPS_VSI:
1896 			if (dev_p) {
1897 				dev_p->num_vsi_allocd_to_host = number;
1898 				ice_debug(hw, ICE_DBG_INIT,
1899 					  "%s: num_vsi_allocd_to_host = %d\n",
1900 					  prefix,
1901 					  dev_p->num_vsi_allocd_to_host);
1902 			} else if (func_p) {
1903 				func_p->guar_num_vsi =
1904 					ice_get_num_per_func(hw, ICE_MAX_VSI);
1905 				ice_debug(hw, ICE_DBG_INIT,
1906 					  "%s: guar_num_vsi (fw) = %d\n",
1907 					  prefix, number);
1908 				ice_debug(hw, ICE_DBG_INIT,
1909 					  "%s: guar_num_vsi = %d\n",
1910 					  prefix, func_p->guar_num_vsi);
1911 			}
1912 			break;
1913 		case ICE_AQC_CAPS_DCB:
1914 			caps->dcb = (number == 1);
1915 			caps->active_tc_bitmap = logical_id;
1916 			caps->maxtc = phys_id;
1917 			ice_debug(hw, ICE_DBG_INIT,
1918 				  "%s: dcb = %d\n", prefix, caps->dcb);
1919 			ice_debug(hw, ICE_DBG_INIT,
1920 				  "%s: active_tc_bitmap = %d\n", prefix,
1921 				  caps->active_tc_bitmap);
1922 			ice_debug(hw, ICE_DBG_INIT,
1923 				  "%s: maxtc = %d\n", prefix, caps->maxtc);
1924 			break;
1925 		case ICE_AQC_CAPS_ISCSI:
1926 			caps->iscsi = (number == 1);
1927 			ice_debug(hw, ICE_DBG_INIT,
1928 				  "%s: iscsi = %d\n", prefix, caps->iscsi);
1929 			break;
1930 		case ICE_AQC_CAPS_RSS:
1931 			caps->rss_table_size = number;
1932 			caps->rss_table_entry_width = logical_id;
1933 			ice_debug(hw, ICE_DBG_INIT,
1934 				  "%s: rss_table_size = %d\n", prefix,
1935 				  caps->rss_table_size);
1936 			ice_debug(hw, ICE_DBG_INIT,
1937 				  "%s: rss_table_entry_width = %d\n", prefix,
1938 				  caps->rss_table_entry_width);
1939 			break;
1940 		case ICE_AQC_CAPS_RXQS:
1941 			caps->num_rxq = number;
1942 			caps->rxq_first_id = phys_id;
1943 			ice_debug(hw, ICE_DBG_INIT,
1944 				  "%s: num_rxq = %d\n", prefix,
1945 				  caps->num_rxq);
1946 			ice_debug(hw, ICE_DBG_INIT,
1947 				  "%s: rxq_first_id = %d\n", prefix,
1948 				  caps->rxq_first_id);
1949 			break;
1950 		case ICE_AQC_CAPS_TXQS:
1951 			caps->num_txq = number;
1952 			caps->txq_first_id = phys_id;
1953 			ice_debug(hw, ICE_DBG_INIT,
1954 				  "%s: num_txq = %d\n", prefix,
1955 				  caps->num_txq);
1956 			ice_debug(hw, ICE_DBG_INIT,
1957 				  "%s: txq_first_id = %d\n", prefix,
1958 				  caps->txq_first_id);
1959 			break;
1960 		case ICE_AQC_CAPS_MSIX:
1961 			caps->num_msix_vectors = number;
1962 			caps->msix_vector_first_id = phys_id;
1963 			ice_debug(hw, ICE_DBG_INIT,
1964 				  "%s: num_msix_vectors = %d\n", prefix,
1965 				  caps->num_msix_vectors);
1966 			ice_debug(hw, ICE_DBG_INIT,
1967 				  "%s: msix_vector_first_id = %d\n", prefix,
1968 				  caps->msix_vector_first_id);
1969 			break;
1970 		case ICE_AQC_CAPS_NVM_VER:
1971 			break;
1972 		case ICE_AQC_CAPS_NVM_MGMT:
1973 			caps->nvm_unified_update =
1974 				(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1975 				true : false;
1976 			ice_debug(hw, ICE_DBG_INIT,
1977 				  "%s: nvm_unified_update = %d\n", prefix,
1978 				  caps->nvm_unified_update);
1979 			break;
1980 		case ICE_AQC_CAPS_CEM:
1981 			caps->mgmt_cem = (number == 1);
1982 			ice_debug(hw, ICE_DBG_INIT,
1983 				  "%s: mgmt_cem = %d\n", prefix,
1984 				  caps->mgmt_cem);
1985 			break;
1986 		case ICE_AQC_CAPS_LED:
1987 			if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
1988 				caps->led[phys_id] = true;
1989 				caps->led_pin_num++;
1990 			}
1991 			break;
1992 		case ICE_AQC_CAPS_SDP:
1993 			if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
1994 				caps->sdp[phys_id] = true;
1995 				caps->sdp_pin_num++;
1996 			}
1997 			break;
1998 		case ICE_AQC_CAPS_WR_CSR_PROT:
1999 			caps->wr_csr_prot = number;
2000 			caps->wr_csr_prot |= (u64)logical_id << 32;
2001 			ice_debug(hw, ICE_DBG_INIT,
2002 				  "%s: wr_csr_prot = 0x%llX\n", prefix,
2003 				  (unsigned long long)caps->wr_csr_prot);
2004 			break;
2005 		case ICE_AQC_CAPS_WOL_PROXY:
2006 			caps->num_wol_proxy_fltr = number;
2007 			caps->wol_proxy_vsi_seid = logical_id;
2008 			caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2009 			caps->acpi_prog_mthd = !!(phys_id &
2010 						  ICE_ACPI_PROG_MTHD_M);
2011 			caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2012 			ice_debug(hw, ICE_DBG_INIT,
2013 				  "%s: num_wol_proxy_fltr = %d\n", prefix,
2014 				  caps->num_wol_proxy_fltr);
2015 			ice_debug(hw, ICE_DBG_INIT,
2016 				  "%s: wol_proxy_vsi_seid = %d\n", prefix,
2017 				  caps->wol_proxy_vsi_seid);
2018 			break;
2019 		case ICE_AQC_CAPS_MAX_MTU:
2020 			caps->max_mtu = number;
2021 			ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2022 				  prefix, caps->max_mtu);
2023 			break;
2024 		default:
2025 			ice_debug(hw, ICE_DBG_INIT,
2026 				  "%s: unknown capability[%d]: 0x%x\n", prefix,
2027 				  i, cap);
2028 			break;
2029 		}
2030 	}
2031 
2032 	ice_print_led_caps(hw, caps, prefix, true);
2033 	ice_print_sdp_caps(hw, caps, prefix, true);
2034 
2035 	/* Re-calculate capabilities that are dependent on the number of
2036 	 * physical ports; i.e. some features are not supported or function
2037 	 * differently on devices with more than 4 ports.
2038 	 */
2039 	if (hw->dev_caps.num_funcs > 4) {
2040 		/* Max 4 TCs per port */
2041 		caps->maxtc = 4;
2042 		ice_debug(hw, ICE_DBG_INIT,
2043 			  "%s: maxtc = %d (based on #ports)\n", prefix,
2044 			  caps->maxtc);
2045 	}
2046 }
2047 
2048 /**
2049  * ice_aq_discover_caps - query function/device capabilities
2050  * @hw: pointer to the HW struct
2051  * @buf: a virtual buffer to hold the capabilities
2052  * @buf_size: Size of the virtual buffer
2053  * @cap_count: cap count needed if AQ err==ENOMEM
2054  * @opc: capabilities type to discover - pass in the command opcode
2055  * @cd: pointer to command details structure or NULL
2056  *
2057  * Get the function(0x000a)/device(0x000b) capabilities description from
2058  * the firmware.
2059  */
2060 enum ice_status
2061 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2062 		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2063 {
2064 	struct ice_aqc_list_caps *cmd;
2065 	struct ice_aq_desc desc;
2066 	enum ice_status status;
2067 
2068 	cmd = &desc.params.get_cap;
2069 
2070 	if (opc != ice_aqc_opc_list_func_caps &&
2071 	    opc != ice_aqc_opc_list_dev_caps)
2072 		return ICE_ERR_PARAM;
2073 
2074 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2075 
2076 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2077 	if (!status)
2078 		ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
2079 	else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
2080 		*cap_count = LE32_TO_CPU(cmd->count);
2081 	return status;
2082 }
2083 
2084 /**
2085  * ice_discover_caps - get info about the HW
2086  * @hw: pointer to the hardware structure
2087  * @opc: capabilities type to discover - pass in the command opcode
2088  */
2089 static enum ice_status
2090 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
2091 {
2092 	enum ice_status status;
2093 	u32 cap_count;
2094 	u16 cbuf_len;
2095 	u8 retries;
2096 
2097 	/* The driver doesn't know how many capabilities the device will return
2098 	 * so the buffer size required isn't known ahead of time. The driver
2099 	 * starts with cbuf_len and if this turns out to be insufficient, the
2100 	 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
2101 	 * The driver then allocates the buffer based on the count and retries
2102 	 * the operation. So it follows that the retry count is 2.
2103 	 */
2104 #define ICE_GET_CAP_BUF_COUNT	40
2105 #define ICE_GET_CAP_RETRY_COUNT	2
2106 
2107 	cap_count = ICE_GET_CAP_BUF_COUNT;
2108 	retries = ICE_GET_CAP_RETRY_COUNT;
2109 
2110 	do {
2111 		void *cbuf;
2112 
2113 		cbuf_len = (u16)(cap_count *
2114 				 sizeof(struct ice_aqc_list_caps_elem));
2115 		cbuf = ice_malloc(hw, cbuf_len);
2116 		if (!cbuf)
2117 			return ICE_ERR_NO_MEMORY;
2118 
2119 		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
2120 					      opc, NULL);
2121 		ice_free(hw, cbuf);
2122 
2123 		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
2124 			break;
2125 
2126 		/* If ENOMEM is returned, try again with bigger buffer */
2127 	} while (--retries);
2128 
2129 	return status;
2130 }
2131 
2132 /**
2133  * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2134  * @hw: pointer to the hardware structure
2135  */
2136 void ice_set_safe_mode_caps(struct ice_hw *hw)
2137 {
2138 	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2139 	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2140 	u32 valid_func, rxq_first_id, txq_first_id;
2141 	u32 msix_vector_first_id, max_mtu;
2142 	u32 num_funcs;
2143 
2144 	/* cache some func_caps values that should be restored after memset */
2145 	valid_func = func_caps->common_cap.valid_functions;
2146 	txq_first_id = func_caps->common_cap.txq_first_id;
2147 	rxq_first_id = func_caps->common_cap.rxq_first_id;
2148 	msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
2149 	max_mtu = func_caps->common_cap.max_mtu;
2150 
2151 	/* unset func capabilities */
2152 	memset(func_caps, 0, sizeof(*func_caps));
2153 
2154 	/* restore cached values */
2155 	func_caps->common_cap.valid_functions = valid_func;
2156 	func_caps->common_cap.txq_first_id = txq_first_id;
2157 	func_caps->common_cap.rxq_first_id = rxq_first_id;
2158 	func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2159 	func_caps->common_cap.max_mtu = max_mtu;
2160 
2161 	/* one Tx and one Rx queue in safe mode */
2162 	func_caps->common_cap.num_rxq = 1;
2163 	func_caps->common_cap.num_txq = 1;
2164 
2165 	/* two MSIX vectors, one for traffic and one for misc causes */
2166 	func_caps->common_cap.num_msix_vectors = 2;
2167 	func_caps->guar_num_vsi = 1;
2168 
2169 	/* cache some dev_caps values that should be restored after memset */
2170 	valid_func = dev_caps->common_cap.valid_functions;
2171 	txq_first_id = dev_caps->common_cap.txq_first_id;
2172 	rxq_first_id = dev_caps->common_cap.rxq_first_id;
2173 	msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
2174 	max_mtu = dev_caps->common_cap.max_mtu;
2175 	num_funcs = dev_caps->num_funcs;
2176 
2177 	/* unset dev capabilities */
2178 	memset(dev_caps, 0, sizeof(*dev_caps));
2179 
2180 	/* restore cached values */
2181 	dev_caps->common_cap.valid_functions = valid_func;
2182 	dev_caps->common_cap.txq_first_id = txq_first_id;
2183 	dev_caps->common_cap.rxq_first_id = rxq_first_id;
2184 	dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2185 	dev_caps->common_cap.max_mtu = max_mtu;
2186 	dev_caps->num_funcs = num_funcs;
2187 
2188 	/* one Tx and one Rx queue per function in safe mode */
2189 	dev_caps->common_cap.num_rxq = num_funcs;
2190 	dev_caps->common_cap.num_txq = num_funcs;
2191 
2192 	/* two MSIX vectors per function */
2193 	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2194 }
2195 
2196 /**
2197  * ice_get_caps - get info about the HW
2198  * @hw: pointer to the hardware structure
2199  */
2200 enum ice_status ice_get_caps(struct ice_hw *hw)
2201 {
2202 	enum ice_status status;
2203 
2204 	status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
2205 	if (!status)
2206 		status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
2207 
2208 	return status;
2209 }
2210 
2211 /**
2212  * ice_aq_manage_mac_write - manage MAC address write command
2213  * @hw: pointer to the HW struct
2214  * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2215  * @flags: flags to control write behavior
2216  * @cd: pointer to command details structure or NULL
2217  *
2218  * This function is used to write MAC address to the NVM (0x0108).
2219  */
2220 enum ice_status
2221 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2222 			struct ice_sq_cd *cd)
2223 {
2224 	struct ice_aqc_manage_mac_write *cmd;
2225 	struct ice_aq_desc desc;
2226 
2227 	cmd = &desc.params.mac_write;
2228 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2229 
2230 	cmd->flags = flags;
2231 	ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
2232 
2233 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2234 }
2235 
2236 /**
2237  * ice_aq_clear_pxe_mode
2238  * @hw: pointer to the HW struct
2239  *
2240  * Tell the firmware that the driver is taking over from PXE (0x0110).
2241  */
2242 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2243 {
2244 	struct ice_aq_desc desc;
2245 
2246 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2247 	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2248 
2249 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2250 }
2251 
2252 /**
2253  * ice_clear_pxe_mode - clear pxe operations mode
2254  * @hw: pointer to the HW struct
2255  *
2256  * Make sure all PXE mode settings are cleared, including things
2257  * like descriptor fetch/write-back mode.
2258  */
2259 void ice_clear_pxe_mode(struct ice_hw *hw)
2260 {
2261 	if (ice_check_sq_alive(hw, &hw->adminq))
2262 		ice_aq_clear_pxe_mode(hw);
2263 }
2264 
2265 /**
2266  * ice_aq_set_port_params - set physical port parameters.
2267  * @pi: pointer to the port info struct
2268  * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2269  * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2270  * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2271  * @double_vlan: if set double VLAN is enabled
2272  * @cd: pointer to command details structure or NULL
2273  *
2274  * Set Physical port parameters (0x0203)
2275  */
2276 enum ice_status
2277 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2278 		       bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2279 		       struct ice_sq_cd *cd)
2280 
2281 {
2282 	struct ice_aqc_set_port_params *cmd;
2283 	struct ice_hw *hw = pi->hw;
2284 	struct ice_aq_desc desc;
2285 	u16 cmd_flags = 0;
2286 
2287 	cmd = &desc.params.set_port_params;
2288 
2289 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2290 	cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2291 	if (save_bad_pac)
2292 		cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2293 	if (pad_short_pac)
2294 		cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2295 	if (double_vlan)
2296 		cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2297 	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2298 
2299 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2300 }
2301 
2302 /**
2303  * ice_get_link_speed_based_on_phy_type - returns link speed
2304  * @phy_type_low: lower part of phy_type
2305  * @phy_type_high: higher part of phy_type
2306  *
2307  * This helper function will convert an entry in PHY type structure
2308  * [phy_type_low, phy_type_high] to its corresponding link speed.
2309  * Note: In the structure of [phy_type_low, phy_type_high], there should
2310  * be one bit set, as this function will convert one PHY type to its
2311  * speed.
2312  * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2313  * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2314  */
2315 static u16
2316 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2317 {
2318 	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2319 	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2320 
2321 	switch (phy_type_low) {
2322 	case ICE_PHY_TYPE_LOW_100BASE_TX:
2323 	case ICE_PHY_TYPE_LOW_100M_SGMII:
2324 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2325 		break;
2326 	case ICE_PHY_TYPE_LOW_1000BASE_T:
2327 	case ICE_PHY_TYPE_LOW_1000BASE_SX:
2328 	case ICE_PHY_TYPE_LOW_1000BASE_LX:
2329 	case ICE_PHY_TYPE_LOW_1000BASE_KX:
2330 	case ICE_PHY_TYPE_LOW_1G_SGMII:
2331 		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2332 		break;
2333 	case ICE_PHY_TYPE_LOW_2500BASE_T:
2334 	case ICE_PHY_TYPE_LOW_2500BASE_X:
2335 	case ICE_PHY_TYPE_LOW_2500BASE_KX:
2336 		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2337 		break;
2338 	case ICE_PHY_TYPE_LOW_5GBASE_T:
2339 	case ICE_PHY_TYPE_LOW_5GBASE_KR:
2340 		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2341 		break;
2342 	case ICE_PHY_TYPE_LOW_10GBASE_T:
2343 	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2344 	case ICE_PHY_TYPE_LOW_10GBASE_SR:
2345 	case ICE_PHY_TYPE_LOW_10GBASE_LR:
2346 	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2347 	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2348 	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2349 		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2350 		break;
2351 	case ICE_PHY_TYPE_LOW_25GBASE_T:
2352 	case ICE_PHY_TYPE_LOW_25GBASE_CR:
2353 	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2354 	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2355 	case ICE_PHY_TYPE_LOW_25GBASE_SR:
2356 	case ICE_PHY_TYPE_LOW_25GBASE_LR:
2357 	case ICE_PHY_TYPE_LOW_25GBASE_KR:
2358 	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2359 	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2360 	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2361 	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2362 		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2363 		break;
2364 	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2365 	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2366 	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2367 	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2368 	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2369 	case ICE_PHY_TYPE_LOW_40G_XLAUI:
2370 		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2371 		break;
2372 	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2373 	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2374 	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2375 	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2376 	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2377 	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2378 	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2379 	case ICE_PHY_TYPE_LOW_50G_AUI2:
2380 	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2381 	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2382 	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2383 	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2384 	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2385 	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2386 	case ICE_PHY_TYPE_LOW_50G_AUI1:
2387 		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2388 		break;
2389 	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2390 	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2391 	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2392 	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2393 	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2394 	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2395 	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2396 	case ICE_PHY_TYPE_LOW_100G_AUI4:
2397 	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2398 	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2399 	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2400 	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2401 	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2402 		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2403 		break;
2404 	default:
2405 		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2406 		break;
2407 	}
2408 
2409 	switch (phy_type_high) {
2410 	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2411 	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2412 	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2413 	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2414 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2415 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2416 		break;
2417 	default:
2418 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2419 		break;
2420 	}
2421 
2422 	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2423 	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2424 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2425 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2426 		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2427 		return ICE_AQ_LINK_SPEED_UNKNOWN;
2428 	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2429 		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2430 		return speed_phy_type_low;
2431 	else
2432 		return speed_phy_type_high;
2433 }
2434 
2435 /**
2436  * ice_update_phy_type
2437  * @phy_type_low: pointer to the lower part of phy_type
2438  * @phy_type_high: pointer to the higher part of phy_type
2439  * @link_speeds_bitmap: targeted link speeds bitmap
2440  *
2441  * Note: For the link_speeds_bitmap structure, you can check it at
2442  * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2443  * link_speeds_bitmap include multiple speeds.
2444  *
2445  * Each entry in this [phy_type_low, phy_type_high] structure will
2446  * present a certain link speed. This helper function will turn on bits
2447  * in [phy_type_low, phy_type_high] structure based on the value of
2448  * link_speeds_bitmap input parameter.
2449  */
2450 void
2451 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2452 		    u16 link_speeds_bitmap)
2453 {
2454 	u64 pt_high;
2455 	u64 pt_low;
2456 	int index;
2457 	u16 speed;
2458 
2459 	/* We first check with low part of phy_type */
2460 	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2461 		pt_low = BIT_ULL(index);
2462 		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2463 
2464 		if (link_speeds_bitmap & speed)
2465 			*phy_type_low |= BIT_ULL(index);
2466 	}
2467 
2468 	/* We then check with high part of phy_type */
2469 	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2470 		pt_high = BIT_ULL(index);
2471 		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2472 
2473 		if (link_speeds_bitmap & speed)
2474 			*phy_type_high |= BIT_ULL(index);
2475 	}
2476 }
2477 
2478 /**
2479  * ice_aq_set_phy_cfg
2480  * @hw: pointer to the HW struct
2481  * @pi: port info structure of the interested logical port
2482  * @cfg: structure with PHY configuration data to be set
2483  * @cd: pointer to command details structure or NULL
2484  *
2485  * Set the various PHY configuration parameters supported on the Port.
2486  * One or more of the Set PHY config parameters may be ignored in an MFP
2487  * mode as the PF may not have the privilege to set some of the PHY Config
2488  * parameters. This status will be indicated by the command response (0x0601).
2489  */
2490 enum ice_status
2491 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2492 		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2493 {
2494 	struct ice_aq_desc desc;
2495 	enum ice_status status;
2496 
2497 	if (!cfg)
2498 		return ICE_ERR_PARAM;
2499 
2500 	/* Ensure that only valid bits of cfg->caps can be turned on. */
2501 	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2502 		ice_debug(hw, ICE_DBG_PHY,
2503 			  "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2504 			  cfg->caps);
2505 
2506 		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2507 	}
2508 
2509 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2510 	desc.params.set_phy.lport_num = pi->lport;
2511 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2512 
2513 	ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2514 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2515 	ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2516 		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2517 	ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2518 	ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl_an = 0x%x\n",
2519 		  cfg->low_power_ctrl_an);
2520 	ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2521 	ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2522 	ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2523 
2524 	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2525 
2526 	if (!status)
2527 		pi->phy.curr_user_phy_cfg = *cfg;
2528 
2529 	return status;
2530 }
2531 
2532 /**
2533  * ice_update_link_info - update status of the HW network link
2534  * @pi: port info structure of the interested logical port
2535  */
2536 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2537 {
2538 	struct ice_link_status *li;
2539 	enum ice_status status;
2540 
2541 	if (!pi)
2542 		return ICE_ERR_PARAM;
2543 
2544 	li = &pi->phy.link_info;
2545 
2546 	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2547 	if (status)
2548 		return status;
2549 
2550 	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2551 		struct ice_aqc_get_phy_caps_data *pcaps;
2552 		struct ice_hw *hw;
2553 
2554 		hw = pi->hw;
2555 		pcaps = (struct ice_aqc_get_phy_caps_data *)
2556 			ice_malloc(hw, sizeof(*pcaps));
2557 		if (!pcaps)
2558 			return ICE_ERR_NO_MEMORY;
2559 
2560 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2561 					     pcaps, NULL);
2562 		if (status == ICE_SUCCESS)
2563 			ice_memcpy(li->module_type, &pcaps->module_type,
2564 				   sizeof(li->module_type),
2565 				   ICE_NONDMA_TO_NONDMA);
2566 
2567 		ice_free(hw, pcaps);
2568 	}
2569 
2570 	return status;
2571 }
2572 
2573 /**
2574  * ice_cache_phy_user_req
2575  * @pi: port information structure
2576  * @cache_data: PHY logging data
2577  * @cache_mode: PHY logging mode
2578  *
2579  * Log the user request on (FC, FEC, SPEED) for later user.
2580  */
2581 static void
2582 ice_cache_phy_user_req(struct ice_port_info *pi,
2583 		       struct ice_phy_cache_mode_data cache_data,
2584 		       enum ice_phy_cache_mode cache_mode)
2585 {
2586 	if (!pi)
2587 		return;
2588 
2589 	switch (cache_mode) {
2590 	case ICE_FC_MODE:
2591 		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2592 		break;
2593 	case ICE_SPEED_MODE:
2594 		pi->phy.curr_user_speed_req =
2595 			cache_data.data.curr_user_speed_req;
2596 		break;
2597 	case ICE_FEC_MODE:
2598 		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2599 		break;
2600 	default:
2601 		break;
2602 	}
2603 }
2604 
2605 /**
2606  * ice_caps_to_fc_mode
2607  * @caps: PHY capabilities
2608  *
2609  * Convert PHY FC capabilities to ice FC mode
2610  */
2611 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2612 {
2613 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2614 	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2615 		return ICE_FC_FULL;
2616 
2617 	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2618 		return ICE_FC_TX_PAUSE;
2619 
2620 	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2621 		return ICE_FC_RX_PAUSE;
2622 
2623 	return ICE_FC_NONE;
2624 }
2625 
2626 /**
2627  * ice_caps_to_fec_mode
2628  * @caps: PHY capabilities
2629  * @fec_options: Link FEC options
2630  *
2631  * Convert PHY FEC capabilities to ice FEC mode
2632  */
2633 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2634 {
2635 	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2636 		return ICE_FEC_AUTO;
2637 
2638 	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2639 			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2640 			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2641 			   ICE_AQC_PHY_FEC_25G_KR_REQ))
2642 		return ICE_FEC_BASER;
2643 
2644 	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2645 			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2646 			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2647 		return ICE_FEC_RS;
2648 
2649 	return ICE_FEC_NONE;
2650 }
2651 
2652 /**
2653  * ice_set_fc
2654  * @pi: port information structure
2655  * @aq_failures: pointer to status code, specific to ice_set_fc routine
2656  * @ena_auto_link_update: enable automatic link update
2657  *
2658  * Set the requested flow control mode.
2659  */
2660 enum ice_status
2661 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2662 {
2663 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2664 	struct ice_phy_cache_mode_data cache_data;
2665 	struct ice_aqc_get_phy_caps_data *pcaps;
2666 	enum ice_status status;
2667 	u8 pause_mask = 0x0;
2668 	struct ice_hw *hw;
2669 
2670 	if (!pi || !aq_failures)
2671 		return ICE_ERR_PARAM;
2672 
2673 	hw = pi->hw;
2674 	*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2675 
2676 	/* Cache user FC request */
2677 	cache_data.data.curr_user_fc_req = pi->fc.req_mode;
2678 	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2679 
2680 	pcaps = (struct ice_aqc_get_phy_caps_data *)
2681 		ice_malloc(hw, sizeof(*pcaps));
2682 	if (!pcaps)
2683 		return ICE_ERR_NO_MEMORY;
2684 
2685 	switch (pi->fc.req_mode) {
2686 	case ICE_FC_AUTO:
2687 		/* Query the value of FC that both the NIC and attached media
2688 		 * can do.
2689 		 */
2690 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2691 					     pcaps, NULL);
2692 		if (status) {
2693 			*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2694 			goto out;
2695 		}
2696 
2697 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2698 		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2699 		break;
2700 	case ICE_FC_FULL:
2701 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2702 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2703 		break;
2704 	case ICE_FC_RX_PAUSE:
2705 		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2706 		break;
2707 	case ICE_FC_TX_PAUSE:
2708 		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2709 		break;
2710 	default:
2711 		break;
2712 	}
2713 
2714 	/* Get the current PHY config */
2715 	ice_memset(pcaps, 0, sizeof(*pcaps), ICE_NONDMA_MEM);
2716 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2717 				     NULL);
2718 	if (status) {
2719 		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2720 		goto out;
2721 	}
2722 
2723 	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2724 
2725 	/* clear the old pause settings */
2726 	cfg.caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2727 		      ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2728 
2729 	/* set the new capabilities */
2730 	cfg.caps |= pause_mask;
2731 
2732 	/* If the capabilities have changed, then set the new config */
2733 	if (cfg.caps != pcaps->caps) {
2734 		int retry_count, retry_max = 10;
2735 
2736 		/* Auto restart link so settings take effect */
2737 		if (ena_auto_link_update)
2738 			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2739 
2740 		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2741 		if (status) {
2742 			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2743 			goto out;
2744 		}
2745 
2746 		/* Update the link info
2747 		 * It sometimes takes a really long time for link to
2748 		 * come back from the atomic reset. Thus, we wait a
2749 		 * little bit.
2750 		 */
2751 		for (retry_count = 0; retry_count < retry_max; retry_count++) {
2752 			status = ice_update_link_info(pi);
2753 
2754 			if (status == ICE_SUCCESS)
2755 				break;
2756 
2757 			ice_msec_delay(100, true);
2758 		}
2759 
2760 		if (status)
2761 			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2762 	}
2763 
2764 out:
2765 	ice_free(hw, pcaps);
2766 	return status;
2767 }
2768 
2769 /**
2770  * ice_phy_caps_equals_cfg
2771  * @phy_caps: PHY capabilities
2772  * @phy_cfg: PHY configuration
2773  *
2774  * Helper function to determine if PHY capabilities matches PHY
2775  * configuration
2776  */
2777 bool
2778 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2779 			struct ice_aqc_set_phy_cfg_data *phy_cfg)
2780 {
2781 	u8 caps_mask, cfg_mask;
2782 
2783 	if (!phy_caps || !phy_cfg)
2784 		return false;
2785 
2786 	/* These bits are not common between capabilities and configuration.
2787 	 * Do not use them to determine equality.
2788 	 */
2789 	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2790 					      ICE_AQC_PHY_EN_MOD_QUAL);
2791 	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2792 
2793 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2794 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2795 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2796 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2797 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
2798 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
2799 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2800 		return false;
2801 
2802 	return true;
2803 }
2804 
2805 /**
2806  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2807  * @pi: port information structure
2808  * @caps: PHY ability structure to copy date from
2809  * @cfg: PHY configuration structure to copy data to
2810  *
2811  * Helper function to copy AQC PHY get ability data to PHY set configuration
2812  * data structure
2813  */
2814 void
2815 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2816 			 struct ice_aqc_get_phy_caps_data *caps,
2817 			 struct ice_aqc_set_phy_cfg_data *cfg)
2818 {
2819 	if (!pi || !caps || !cfg)
2820 		return;
2821 
2822 	ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
2823 	cfg->phy_type_low = caps->phy_type_low;
2824 	cfg->phy_type_high = caps->phy_type_high;
2825 	cfg->caps = caps->caps;
2826 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2827 	cfg->eee_cap = caps->eee_cap;
2828 	cfg->eeer_value = caps->eeer_value;
2829 	cfg->link_fec_opt = caps->link_fec_options;
2830 	cfg->module_compliance_enforcement =
2831 		caps->module_compliance_enforcement;
2832 
2833 	if (ice_fw_supports_link_override(pi->hw)) {
2834 		struct ice_link_default_override_tlv tlv;
2835 
2836 		if (ice_get_link_default_override(&tlv, pi))
2837 			return;
2838 
2839 		if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2840 			cfg->module_compliance_enforcement |=
2841 				ICE_LINK_OVERRIDE_STRICT_MODE;
2842 	}
2843 }
2844 
2845 /**
2846  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2847  * @pi: port information structure
2848  * @cfg: PHY configuration data to set FEC mode
2849  * @fec: FEC mode to configure
2850  */
2851 enum ice_status
2852 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2853 		enum ice_fec_mode fec)
2854 {
2855 	struct ice_aqc_get_phy_caps_data *pcaps;
2856 	enum ice_status status = ICE_SUCCESS;
2857 	struct ice_hw *hw;
2858 
2859 	if (!pi || !cfg)
2860 		return ICE_ERR_BAD_PTR;
2861 
2862 	hw = pi->hw;
2863 
2864 	pcaps = (struct ice_aqc_get_phy_caps_data *)
2865 		ice_malloc(hw, sizeof(*pcaps));
2866 	if (!pcaps)
2867 		return ICE_ERR_NO_MEMORY;
2868 
2869 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
2870 				     NULL);
2871 	if (status)
2872 		goto out;
2873 
2874 	switch (fec) {
2875 	case ICE_FEC_BASER:
2876 		/* Clear RS bits, and AND BASE-R ability
2877 		 * bits and OR request bits.
2878 		 */
2879 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2880 			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2881 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2882 			ICE_AQC_PHY_FEC_25G_KR_REQ;
2883 		break;
2884 	case ICE_FEC_RS:
2885 		/* Clear BASE-R bits, and AND RS ability
2886 		 * bits and OR request bits.
2887 		 */
2888 		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2889 		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2890 			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2891 		break;
2892 	case ICE_FEC_NONE:
2893 		/* Clear all FEC option bits. */
2894 		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2895 		break;
2896 	case ICE_FEC_AUTO:
2897 		/* AND auto FEC bit, and all caps bits. */
2898 		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2899 		cfg->link_fec_opt |= pcaps->link_fec_options;
2900 		break;
2901 	default:
2902 		status = ICE_ERR_PARAM;
2903 		break;
2904 	}
2905 
2906 	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
2907 		struct ice_link_default_override_tlv tlv;
2908 
2909 		if (ice_get_link_default_override(&tlv, pi))
2910 			goto out;
2911 
2912 		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
2913 		    (tlv.options & ICE_LINK_OVERRIDE_EN))
2914 			cfg->link_fec_opt = tlv.fec_options;
2915 	}
2916 
2917 out:
2918 	ice_free(hw, pcaps);
2919 
2920 	return status;
2921 }
2922 
2923 /**
2924  * ice_get_link_status - get status of the HW network link
2925  * @pi: port information structure
2926  * @link_up: pointer to bool (true/false = linkup/linkdown)
2927  *
2928  * Variable link_up is true if link is up, false if link is down.
2929  * The variable link_up is invalid if status is non zero. As a
2930  * result of this call, link status reporting becomes enabled
2931  */
2932 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2933 {
2934 	struct ice_phy_info *phy_info;
2935 	enum ice_status status = ICE_SUCCESS;
2936 
2937 	if (!pi || !link_up)
2938 		return ICE_ERR_PARAM;
2939 
2940 	phy_info = &pi->phy;
2941 
2942 	if (phy_info->get_link_info) {
2943 		status = ice_update_link_info(pi);
2944 
2945 		if (status)
2946 			ice_debug(pi->hw, ICE_DBG_LINK,
2947 				  "get link status error, status = %d\n",
2948 				  status);
2949 	}
2950 
2951 	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2952 
2953 	return status;
2954 }
2955 
2956 /**
2957  * ice_aq_set_link_restart_an
2958  * @pi: pointer to the port information structure
2959  * @ena_link: if true: enable link, if false: disable link
2960  * @cd: pointer to command details structure or NULL
2961  *
2962  * Sets up the link and restarts the Auto-Negotiation over the link.
2963  */
2964 enum ice_status
2965 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2966 			   struct ice_sq_cd *cd)
2967 {
2968 	struct ice_aqc_restart_an *cmd;
2969 	struct ice_aq_desc desc;
2970 
2971 	cmd = &desc.params.restart_an;
2972 
2973 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2974 
2975 	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2976 	cmd->lport_num = pi->lport;
2977 	if (ena_link)
2978 		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2979 	else
2980 		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2981 
2982 	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2983 }
2984 
2985 /**
2986  * ice_aq_set_event_mask
2987  * @hw: pointer to the HW struct
2988  * @port_num: port number of the physical function
2989  * @mask: event mask to be set
2990  * @cd: pointer to command details structure or NULL
2991  *
2992  * Set event mask (0x0613)
2993  */
2994 enum ice_status
2995 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2996 		      struct ice_sq_cd *cd)
2997 {
2998 	struct ice_aqc_set_event_mask *cmd;
2999 	struct ice_aq_desc desc;
3000 
3001 	cmd = &desc.params.set_event_mask;
3002 
3003 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3004 
3005 	cmd->lport_num = port_num;
3006 
3007 	cmd->event_mask = CPU_TO_LE16(mask);
3008 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3009 }
3010 
3011 /**
3012  * ice_aq_set_mac_loopback
3013  * @hw: pointer to the HW struct
3014  * @ena_lpbk: Enable or Disable loopback
3015  * @cd: pointer to command details structure or NULL
3016  *
3017  * Enable/disable loopback on a given port
3018  */
3019 enum ice_status
3020 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3021 {
3022 	struct ice_aqc_set_mac_lb *cmd;
3023 	struct ice_aq_desc desc;
3024 
3025 	cmd = &desc.params.set_mac_lb;
3026 
3027 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3028 	if (ena_lpbk)
3029 		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3030 
3031 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3032 }
3033 
3034 /**
3035  * ice_aq_set_port_id_led
3036  * @pi: pointer to the port information
3037  * @is_orig_mode: is this LED set to original mode (by the net-list)
3038  * @cd: pointer to command details structure or NULL
3039  *
3040  * Set LED value for the given port (0x06e9)
3041  */
3042 enum ice_status
3043 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3044 		       struct ice_sq_cd *cd)
3045 {
3046 	struct ice_aqc_set_port_id_led *cmd;
3047 	struct ice_hw *hw = pi->hw;
3048 	struct ice_aq_desc desc;
3049 
3050 	cmd = &desc.params.set_port_id_led;
3051 
3052 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3053 
3054 	if (is_orig_mode)
3055 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3056 	else
3057 		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3058 
3059 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3060 }
3061 
3062 /**
3063  * ice_aq_sff_eeprom
3064  * @hw: pointer to the HW struct
3065  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3066  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3067  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3068  * @page: QSFP page
3069  * @set_page: set or ignore the page
3070  * @data: pointer to data buffer to be read/written to the I2C device.
3071  * @length: 1-16 for read, 1 for write.
3072  * @write: 0 read, 1 for write.
3073  * @cd: pointer to command details structure or NULL
3074  *
3075  * Read/Write SFF EEPROM (0x06EE)
3076  */
3077 enum ice_status
3078 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3079 		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3080 		  bool write, struct ice_sq_cd *cd)
3081 {
3082 	struct ice_aqc_sff_eeprom *cmd;
3083 	struct ice_aq_desc desc;
3084 	enum ice_status status;
3085 
3086 	if (!data || (mem_addr & 0xff00))
3087 		return ICE_ERR_PARAM;
3088 
3089 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3090 	cmd = &desc.params.read_write_sff_param;
3091 	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3092 	cmd->lport_num = (u8)(lport & 0xff);
3093 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3094 	cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3095 					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3096 					((set_page <<
3097 					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3098 					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3099 	cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3100 	cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3101 	if (write)
3102 		cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3103 
3104 	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3105 	return status;
3106 }
3107 
3108 /**
3109  * __ice_aq_get_set_rss_lut
3110  * @hw: pointer to the hardware structure
3111  * @vsi_id: VSI FW index
3112  * @lut_type: LUT table type
3113  * @lut: pointer to the LUT buffer provided by the caller
3114  * @lut_size: size of the LUT buffer
3115  * @glob_lut_idx: global LUT index
3116  * @set: set true to set the table, false to get the table
3117  *
3118  * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3119  */
3120 static enum ice_status
3121 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3122 			 u16 lut_size, u8 glob_lut_idx, bool set)
3123 {
3124 	struct ice_aqc_get_set_rss_lut *cmd_resp;
3125 	struct ice_aq_desc desc;
3126 	enum ice_status status;
3127 	u16 flags = 0;
3128 
3129 	cmd_resp = &desc.params.get_set_rss_lut;
3130 
3131 	if (set) {
3132 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3133 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3134 	} else {
3135 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3136 	}
3137 
3138 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3139 					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3140 					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3141 				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3142 
3143 	switch (lut_type) {
3144 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3145 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3146 	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3147 		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3148 			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3149 		break;
3150 	default:
3151 		status = ICE_ERR_PARAM;
3152 		goto ice_aq_get_set_rss_lut_exit;
3153 	}
3154 
3155 	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3156 		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3157 			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3158 
3159 		if (!set)
3160 			goto ice_aq_get_set_rss_lut_send;
3161 	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3162 		if (!set)
3163 			goto ice_aq_get_set_rss_lut_send;
3164 	} else {
3165 		goto ice_aq_get_set_rss_lut_send;
3166 	}
3167 
3168 	/* LUT size is only valid for Global and PF table types */
3169 	switch (lut_size) {
3170 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3171 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3172 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3173 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3174 		break;
3175 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3176 		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3177 			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3178 			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3179 		break;
3180 	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3181 		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3182 			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3183 				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3184 				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3185 			break;
3186 		}
3187 		/* fall-through */
3188 	default:
3189 		status = ICE_ERR_PARAM;
3190 		goto ice_aq_get_set_rss_lut_exit;
3191 	}
3192 
3193 ice_aq_get_set_rss_lut_send:
3194 	cmd_resp->flags = CPU_TO_LE16(flags);
3195 	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3196 
3197 ice_aq_get_set_rss_lut_exit:
3198 	return status;
3199 }
3200 
3201 /**
3202  * ice_aq_get_rss_lut
3203  * @hw: pointer to the hardware structure
3204  * @vsi_handle: software VSI handle
3205  * @lut_type: LUT table type
3206  * @lut: pointer to the LUT buffer provided by the caller
3207  * @lut_size: size of the LUT buffer
3208  *
3209  * get the RSS lookup table, PF or VSI type
3210  */
3211 enum ice_status
3212 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3213 		   u8 *lut, u16 lut_size)
3214 {
3215 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3216 		return ICE_ERR_PARAM;
3217 
3218 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3219 					lut_type, lut, lut_size, 0, false);
3220 }
3221 
3222 /**
3223  * ice_aq_set_rss_lut
3224  * @hw: pointer to the hardware structure
3225  * @vsi_handle: software VSI handle
3226  * @lut_type: LUT table type
3227  * @lut: pointer to the LUT buffer provided by the caller
3228  * @lut_size: size of the LUT buffer
3229  *
3230  * set the RSS lookup table, PF or VSI type
3231  */
3232 enum ice_status
3233 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3234 		   u8 *lut, u16 lut_size)
3235 {
3236 	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3237 		return ICE_ERR_PARAM;
3238 
3239 	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3240 					lut_type, lut, lut_size, 0, true);
3241 }
3242 
3243 /**
3244  * __ice_aq_get_set_rss_key
3245  * @hw: pointer to the HW struct
3246  * @vsi_id: VSI FW index
3247  * @key: pointer to key info struct
3248  * @set: set true to set the key, false to get the key
3249  *
3250  * get (0x0B04) or set (0x0B02) the RSS key per VSI
3251  */
3252 static enum
3253 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3254 				    struct ice_aqc_get_set_rss_keys *key,
3255 				    bool set)
3256 {
3257 	struct ice_aqc_get_set_rss_key *cmd_resp;
3258 	u16 key_size = sizeof(*key);
3259 	struct ice_aq_desc desc;
3260 
3261 	cmd_resp = &desc.params.get_set_rss_key;
3262 
3263 	if (set) {
3264 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3265 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3266 	} else {
3267 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3268 	}
3269 
3270 	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3271 					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3272 					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3273 				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3274 
3275 	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3276 }
3277 
3278 /**
3279  * ice_aq_get_rss_key
3280  * @hw: pointer to the HW struct
3281  * @vsi_handle: software VSI handle
3282  * @key: pointer to key info struct
3283  *
3284  * get the RSS key per VSI
3285  */
3286 enum ice_status
3287 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3288 		   struct ice_aqc_get_set_rss_keys *key)
3289 {
3290 	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3291 		return ICE_ERR_PARAM;
3292 
3293 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3294 					key, false);
3295 }
3296 
3297 /**
3298  * ice_aq_set_rss_key
3299  * @hw: pointer to the HW struct
3300  * @vsi_handle: software VSI handle
3301  * @keys: pointer to key info struct
3302  *
3303  * set the RSS key per VSI
3304  */
3305 enum ice_status
3306 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3307 		   struct ice_aqc_get_set_rss_keys *keys)
3308 {
3309 	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3310 		return ICE_ERR_PARAM;
3311 
3312 	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3313 					keys, true);
3314 }
3315 
3316 /**
3317  * ice_aq_add_lan_txq
3318  * @hw: pointer to the hardware structure
3319  * @num_qgrps: Number of added queue groups
3320  * @qg_list: list of queue groups to be added
3321  * @buf_size: size of buffer for indirect command
3322  * @cd: pointer to command details structure or NULL
3323  *
3324  * Add Tx LAN queue (0x0C30)
3325  *
3326  * NOTE:
3327  * Prior to calling add Tx LAN queue:
3328  * Initialize the following as part of the Tx queue context:
3329  * Completion queue ID if the queue uses Completion queue, Quanta profile,
3330  * Cache profile and Packet shaper profile.
3331  *
3332  * After add Tx LAN queue AQ command is completed:
3333  * Interrupts should be associated with specific queues,
3334  * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3335  * flow.
3336  */
3337 enum ice_status
3338 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3339 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3340 		   struct ice_sq_cd *cd)
3341 {
3342 	u16 i, sum_header_size, sum_q_size = 0;
3343 	struct ice_aqc_add_tx_qgrp *list;
3344 	struct ice_aqc_add_txqs *cmd;
3345 	struct ice_aq_desc desc;
3346 
3347 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3348 
3349 	cmd = &desc.params.add_txqs;
3350 
3351 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3352 
3353 	if (!qg_list)
3354 		return ICE_ERR_PARAM;
3355 
3356 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3357 		return ICE_ERR_PARAM;
3358 
3359 	sum_header_size = num_qgrps *
3360 		(sizeof(*qg_list) - sizeof(*qg_list->txqs));
3361 
3362 	list = qg_list;
3363 	for (i = 0; i < num_qgrps; i++) {
3364 		struct ice_aqc_add_txqs_perq *q = list->txqs;
3365 
3366 		sum_q_size += list->num_txqs * sizeof(*q);
3367 		list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
3368 	}
3369 
3370 	if (buf_size != (sum_header_size + sum_q_size))
3371 		return ICE_ERR_PARAM;
3372 
3373 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3374 
3375 	cmd->num_qgrps = num_qgrps;
3376 
3377 	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3378 }
3379 
3380 /**
3381  * ice_aq_dis_lan_txq
3382  * @hw: pointer to the hardware structure
3383  * @num_qgrps: number of groups in the list
3384  * @qg_list: the list of groups to disable
3385  * @buf_size: the total size of the qg_list buffer in bytes
3386  * @rst_src: if called due to reset, specifies the reset source
3387  * @vmvf_num: the relative VM or VF number that is undergoing the reset
3388  * @cd: pointer to command details structure or NULL
3389  *
3390  * Disable LAN Tx queue (0x0C31)
3391  */
3392 static enum ice_status
3393 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3394 		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3395 		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
3396 		   struct ice_sq_cd *cd)
3397 {
3398 	struct ice_aqc_dis_txqs *cmd;
3399 	struct ice_aq_desc desc;
3400 	enum ice_status status;
3401 	u16 i, sz = 0;
3402 
3403 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3404 	cmd = &desc.params.dis_txqs;
3405 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3406 
3407 	/* qg_list can be NULL only in VM/VF reset flow */
3408 	if (!qg_list && !rst_src)
3409 		return ICE_ERR_PARAM;
3410 
3411 	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3412 		return ICE_ERR_PARAM;
3413 
3414 	cmd->num_entries = num_qgrps;
3415 
3416 	cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3417 					    ICE_AQC_Q_DIS_TIMEOUT_M);
3418 
3419 	switch (rst_src) {
3420 	case ICE_VM_RESET:
3421 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3422 		cmd->vmvf_and_timeout |=
3423 			CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3424 		break;
3425 	case ICE_VF_RESET:
3426 		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3427 		/* In this case, FW expects vmvf_num to be absolute VF ID */
3428 		cmd->vmvf_and_timeout |=
3429 			CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
3430 				    ICE_AQC_Q_DIS_VMVF_NUM_M);
3431 		break;
3432 	case ICE_NO_RESET:
3433 	default:
3434 		break;
3435 	}
3436 
3437 	/* flush pipe on time out */
3438 	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3439 	/* If no queue group info, we are in a reset flow. Issue the AQ */
3440 	if (!qg_list)
3441 		goto do_aq;
3442 
3443 	/* set RD bit to indicate that command buffer is provided by the driver
3444 	 * and it needs to be read by the firmware
3445 	 */
3446 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3447 
3448 	for (i = 0; i < num_qgrps; ++i) {
3449 		/* Calculate the size taken up by the queue IDs in this group */
3450 		sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
3451 
3452 		/* Add the size of the group header */
3453 		sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
3454 
3455 		/* If the num of queues is even, add 2 bytes of padding */
3456 		if ((qg_list[i].num_qs % 2) == 0)
3457 			sz += 2;
3458 	}
3459 
3460 	if (buf_size != sz)
3461 		return ICE_ERR_PARAM;
3462 
3463 do_aq:
3464 	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3465 	if (status) {
3466 		if (!qg_list)
3467 			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3468 				  vmvf_num, hw->adminq.sq_last_status);
3469 		else
3470 			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3471 				  LE16_TO_CPU(qg_list[0].q_id[0]),
3472 				  hw->adminq.sq_last_status);
3473 	}
3474 	return status;
3475 }
3476 
3477 /**
3478  * ice_aq_move_recfg_lan_txq
3479  * @hw: pointer to the hardware structure
3480  * @num_qs: number of queues to move/reconfigure
3481  * @is_move: true if this operation involves node movement
3482  * @is_tc_change: true if this operation involves a TC change
3483  * @subseq_call: true if this operation is a subsequent call
3484  * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3485  * @timeout: timeout in units of 100 usec (valid values 0-50)
3486  * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3487  * @buf: struct containing src/dest TEID and per-queue info
3488  * @buf_size: size of buffer for indirect command
3489  * @txqs_moved: out param, number of queues successfully moved
3490  * @cd: pointer to command details structure or NULL
3491  *
3492  * Move / Reconfigure Tx LAN queues (0x0C32)
3493  */
3494 enum ice_status
3495 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3496 			  bool is_tc_change, bool subseq_call, bool flush_pipe,
3497 			  u8 timeout, u32 *blocked_cgds,
3498 			  struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3499 			  u8 *txqs_moved, struct ice_sq_cd *cd)
3500 {
3501 	struct ice_aqc_move_txqs *cmd;
3502 	struct ice_aq_desc desc;
3503 	enum ice_status status;
3504 
3505 	cmd = &desc.params.move_txqs;
3506 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3507 
3508 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3509 	if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3510 		return ICE_ERR_PARAM;
3511 
3512 	if (is_tc_change && !flush_pipe && !blocked_cgds)
3513 		return ICE_ERR_PARAM;
3514 
3515 	if (!is_move && !is_tc_change)
3516 		return ICE_ERR_PARAM;
3517 
3518 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3519 
3520 	if (is_move)
3521 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3522 
3523 	if (is_tc_change)
3524 		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3525 
3526 	if (subseq_call)
3527 		cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3528 
3529 	if (flush_pipe)
3530 		cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3531 
3532 	cmd->num_qs = num_qs;
3533 	cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3534 			ICE_AQC_Q_CMD_TIMEOUT_M);
3535 
3536 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3537 
3538 	if (!status && txqs_moved)
3539 		*txqs_moved = cmd->num_qs;
3540 
3541 	if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3542 	    is_tc_change && !flush_pipe)
3543 		*blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3544 
3545 	return status;
3546 }
3547 
3548 /* End of FW Admin Queue command wrappers */
3549 
3550 /**
3551  * ice_write_byte - write a byte to a packed context structure
3552  * @src_ctx:  the context structure to read from
3553  * @dest_ctx: the context to be written to
3554  * @ce_info:  a description of the struct to be filled
3555  */
3556 static void
3557 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3558 {
3559 	u8 src_byte, dest_byte, mask;
3560 	u8 *from, *dest;
3561 	u16 shift_width;
3562 
3563 	/* copy from the next struct field */
3564 	from = src_ctx + ce_info->offset;
3565 
3566 	/* prepare the bits and mask */
3567 	shift_width = ce_info->lsb % 8;
3568 	mask = (u8)(BIT(ce_info->width) - 1);
3569 
3570 	src_byte = *from;
3571 	src_byte &= mask;
3572 
3573 	/* shift to correct alignment */
3574 	mask <<= shift_width;
3575 	src_byte <<= shift_width;
3576 
3577 	/* get the current bits from the target bit string */
3578 	dest = dest_ctx + (ce_info->lsb / 8);
3579 
3580 	ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3581 
3582 	dest_byte &= ~mask;	/* get the bits not changing */
3583 	dest_byte |= src_byte;	/* add in the new bits */
3584 
3585 	/* put it all back */
3586 	ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3587 }
3588 
3589 /**
3590  * ice_write_word - write a word to a packed context structure
3591  * @src_ctx:  the context structure to read from
3592  * @dest_ctx: the context to be written to
3593  * @ce_info:  a description of the struct to be filled
3594  */
3595 static void
3596 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3597 {
3598 	u16 src_word, mask;
3599 	__le16 dest_word;
3600 	u8 *from, *dest;
3601 	u16 shift_width;
3602 
3603 	/* copy from the next struct field */
3604 	from = src_ctx + ce_info->offset;
3605 
3606 	/* prepare the bits and mask */
3607 	shift_width = ce_info->lsb % 8;
3608 	mask = BIT(ce_info->width) - 1;
3609 
3610 	/* don't swizzle the bits until after the mask because the mask bits
3611 	 * will be in a different bit position on big endian machines
3612 	 */
3613 	src_word = *(u16 *)from;
3614 	src_word &= mask;
3615 
3616 	/* shift to correct alignment */
3617 	mask <<= shift_width;
3618 	src_word <<= shift_width;
3619 
3620 	/* get the current bits from the target bit string */
3621 	dest = dest_ctx + (ce_info->lsb / 8);
3622 
3623 	ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3624 
3625 	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
3626 	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
3627 
3628 	/* put it all back */
3629 	ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3630 }
3631 
3632 /**
3633  * ice_write_dword - write a dword to a packed context structure
3634  * @src_ctx:  the context structure to read from
3635  * @dest_ctx: the context to be written to
3636  * @ce_info:  a description of the struct to be filled
3637  */
3638 static void
3639 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3640 {
3641 	u32 src_dword, mask;
3642 	__le32 dest_dword;
3643 	u8 *from, *dest;
3644 	u16 shift_width;
3645 
3646 	/* copy from the next struct field */
3647 	from = src_ctx + ce_info->offset;
3648 
3649 	/* prepare the bits and mask */
3650 	shift_width = ce_info->lsb % 8;
3651 
3652 	/* if the field width is exactly 32 on an x86 machine, then the shift
3653 	 * operation will not work because the SHL instructions count is masked
3654 	 * to 5 bits so the shift will do nothing
3655 	 */
3656 	if (ce_info->width < 32)
3657 		mask = BIT(ce_info->width) - 1;
3658 	else
3659 		mask = (u32)~0;
3660 
3661 	/* don't swizzle the bits until after the mask because the mask bits
3662 	 * will be in a different bit position on big endian machines
3663 	 */
3664 	src_dword = *(u32 *)from;
3665 	src_dword &= mask;
3666 
3667 	/* shift to correct alignment */
3668 	mask <<= shift_width;
3669 	src_dword <<= shift_width;
3670 
3671 	/* get the current bits from the target bit string */
3672 	dest = dest_ctx + (ce_info->lsb / 8);
3673 
3674 	ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3675 
3676 	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
3677 	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
3678 
3679 	/* put it all back */
3680 	ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3681 }
3682 
3683 /**
3684  * ice_write_qword - write a qword to a packed context structure
3685  * @src_ctx:  the context structure to read from
3686  * @dest_ctx: the context to be written to
3687  * @ce_info:  a description of the struct to be filled
3688  */
3689 static void
3690 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3691 {
3692 	u64 src_qword, mask;
3693 	__le64 dest_qword;
3694 	u8 *from, *dest;
3695 	u16 shift_width;
3696 
3697 	/* copy from the next struct field */
3698 	from = src_ctx + ce_info->offset;
3699 
3700 	/* prepare the bits and mask */
3701 	shift_width = ce_info->lsb % 8;
3702 
3703 	/* if the field width is exactly 64 on an x86 machine, then the shift
3704 	 * operation will not work because the SHL instructions count is masked
3705 	 * to 6 bits so the shift will do nothing
3706 	 */
3707 	if (ce_info->width < 64)
3708 		mask = BIT_ULL(ce_info->width) - 1;
3709 	else
3710 		mask = (u64)~0;
3711 
3712 	/* don't swizzle the bits until after the mask because the mask bits
3713 	 * will be in a different bit position on big endian machines
3714 	 */
3715 	src_qword = *(u64 *)from;
3716 	src_qword &= mask;
3717 
3718 	/* shift to correct alignment */
3719 	mask <<= shift_width;
3720 	src_qword <<= shift_width;
3721 
3722 	/* get the current bits from the target bit string */
3723 	dest = dest_ctx + (ce_info->lsb / 8);
3724 
3725 	ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3726 
3727 	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
3728 	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
3729 
3730 	/* put it all back */
3731 	ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3732 }
3733 
3734 /**
3735  * ice_set_ctx - set context bits in packed structure
3736  * @src_ctx:  pointer to a generic non-packed context structure
3737  * @dest_ctx: pointer to memory for the packed structure
3738  * @ce_info:  a description of the structure to be transformed
3739  */
3740 enum ice_status
3741 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3742 {
3743 	int f;
3744 
3745 	for (f = 0; ce_info[f].width; f++) {
3746 		/* We have to deal with each element of the FW response
3747 		 * using the correct size so that we are correct regardless
3748 		 * of the endianness of the machine.
3749 		 */
3750 		switch (ce_info[f].size_of) {
3751 		case sizeof(u8):
3752 			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3753 			break;
3754 		case sizeof(u16):
3755 			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3756 			break;
3757 		case sizeof(u32):
3758 			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3759 			break;
3760 		case sizeof(u64):
3761 			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3762 			break;
3763 		default:
3764 			return ICE_ERR_INVAL_SIZE;
3765 		}
3766 	}
3767 
3768 	return ICE_SUCCESS;
3769 }
3770 
3771 /**
3772  * ice_read_byte - read context byte into struct
3773  * @src_ctx:  the context structure to read from
3774  * @dest_ctx: the context to be written to
3775  * @ce_info:  a description of the struct to be filled
3776  */
3777 static void
3778 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3779 {
3780 	u8 dest_byte, mask;
3781 	u8 *src, *target;
3782 	u16 shift_width;
3783 
3784 	/* prepare the bits and mask */
3785 	shift_width = ce_info->lsb % 8;
3786 	mask = (u8)(BIT(ce_info->width) - 1);
3787 
3788 	/* shift to correct alignment */
3789 	mask <<= shift_width;
3790 
3791 	/* get the current bits from the src bit string */
3792 	src = src_ctx + (ce_info->lsb / 8);
3793 
3794 	ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3795 
3796 	dest_byte &= ~(mask);
3797 
3798 	dest_byte >>= shift_width;
3799 
3800 	/* get the address from the struct field */
3801 	target = dest_ctx + ce_info->offset;
3802 
3803 	/* put it back in the struct */
3804 	ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3805 }
3806 
3807 /**
3808  * ice_read_word - read context word into struct
3809  * @src_ctx:  the context structure to read from
3810  * @dest_ctx: the context to be written to
3811  * @ce_info:  a description of the struct to be filled
3812  */
3813 static void
3814 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3815 {
3816 	u16 dest_word, mask;
3817 	u8 *src, *target;
3818 	__le16 src_word;
3819 	u16 shift_width;
3820 
3821 	/* prepare the bits and mask */
3822 	shift_width = ce_info->lsb % 8;
3823 	mask = BIT(ce_info->width) - 1;
3824 
3825 	/* shift to correct alignment */
3826 	mask <<= shift_width;
3827 
3828 	/* get the current bits from the src bit string */
3829 	src = src_ctx + (ce_info->lsb / 8);
3830 
3831 	ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
3832 
3833 	/* the data in the memory is stored as little endian so mask it
3834 	 * correctly
3835 	 */
3836 	src_word &= ~(CPU_TO_LE16(mask));
3837 
3838 	/* get the data back into host order before shifting */
3839 	dest_word = LE16_TO_CPU(src_word);
3840 
3841 	dest_word >>= shift_width;
3842 
3843 	/* get the address from the struct field */
3844 	target = dest_ctx + ce_info->offset;
3845 
3846 	/* put it back in the struct */
3847 	ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3848 }
3849 
3850 /**
3851  * ice_read_dword - read context dword into struct
3852  * @src_ctx:  the context structure to read from
3853  * @dest_ctx: the context to be written to
3854  * @ce_info:  a description of the struct to be filled
3855  */
3856 static void
3857 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3858 {
3859 	u32 dest_dword, mask;
3860 	__le32 src_dword;
3861 	u8 *src, *target;
3862 	u16 shift_width;
3863 
3864 	/* prepare the bits and mask */
3865 	shift_width = ce_info->lsb % 8;
3866 
3867 	/* if the field width is exactly 32 on an x86 machine, then the shift
3868 	 * operation will not work because the SHL instructions count is masked
3869 	 * to 5 bits so the shift will do nothing
3870 	 */
3871 	if (ce_info->width < 32)
3872 		mask = BIT(ce_info->width) - 1;
3873 	else
3874 		mask = (u32)~0;
3875 
3876 	/* shift to correct alignment */
3877 	mask <<= shift_width;
3878 
3879 	/* get the current bits from the src bit string */
3880 	src = src_ctx + (ce_info->lsb / 8);
3881 
3882 	ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
3883 
3884 	/* the data in the memory is stored as little endian so mask it
3885 	 * correctly
3886 	 */
3887 	src_dword &= ~(CPU_TO_LE32(mask));
3888 
3889 	/* get the data back into host order before shifting */
3890 	dest_dword = LE32_TO_CPU(src_dword);
3891 
3892 	dest_dword >>= shift_width;
3893 
3894 	/* get the address from the struct field */
3895 	target = dest_ctx + ce_info->offset;
3896 
3897 	/* put it back in the struct */
3898 	ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3899 }
3900 
3901 /**
3902  * ice_read_qword - read context qword into struct
3903  * @src_ctx:  the context structure to read from
3904  * @dest_ctx: the context to be written to
3905  * @ce_info:  a description of the struct to be filled
3906  */
3907 static void
3908 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3909 {
3910 	u64 dest_qword, mask;
3911 	__le64 src_qword;
3912 	u8 *src, *target;
3913 	u16 shift_width;
3914 
3915 	/* prepare the bits and mask */
3916 	shift_width = ce_info->lsb % 8;
3917 
3918 	/* if the field width is exactly 64 on an x86 machine, then the shift
3919 	 * operation will not work because the SHL instructions count is masked
3920 	 * to 6 bits so the shift will do nothing
3921 	 */
3922 	if (ce_info->width < 64)
3923 		mask = BIT_ULL(ce_info->width) - 1;
3924 	else
3925 		mask = (u64)~0;
3926 
3927 	/* shift to correct alignment */
3928 	mask <<= shift_width;
3929 
3930 	/* get the current bits from the src bit string */
3931 	src = src_ctx + (ce_info->lsb / 8);
3932 
3933 	ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
3934 
3935 	/* the data in the memory is stored as little endian so mask it
3936 	 * correctly
3937 	 */
3938 	src_qword &= ~(CPU_TO_LE64(mask));
3939 
3940 	/* get the data back into host order before shifting */
3941 	dest_qword = LE64_TO_CPU(src_qword);
3942 
3943 	dest_qword >>= shift_width;
3944 
3945 	/* get the address from the struct field */
3946 	target = dest_ctx + ce_info->offset;
3947 
3948 	/* put it back in the struct */
3949 	ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3950 }
3951 
3952 /**
3953  * ice_get_ctx - extract context bits from a packed structure
3954  * @src_ctx:  pointer to a generic packed context structure
3955  * @dest_ctx: pointer to a generic non-packed context structure
3956  * @ce_info:  a description of the structure to be read from
3957  */
3958 enum ice_status
3959 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3960 {
3961 	int f;
3962 
3963 	for (f = 0; ce_info[f].width; f++) {
3964 		switch (ce_info[f].size_of) {
3965 		case 1:
3966 			ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
3967 			break;
3968 		case 2:
3969 			ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
3970 			break;
3971 		case 4:
3972 			ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
3973 			break;
3974 		case 8:
3975 			ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
3976 			break;
3977 		default:
3978 			/* nothing to do, just keep going */
3979 			break;
3980 		}
3981 	}
3982 
3983 	return ICE_SUCCESS;
3984 }
3985 
3986 /**
3987  * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3988  * @hw: pointer to the HW struct
3989  * @vsi_handle: software VSI handle
3990  * @tc: TC number
3991  * @q_handle: software queue handle
3992  */
3993 struct ice_q_ctx *
3994 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3995 {
3996 	struct ice_vsi_ctx *vsi;
3997 	struct ice_q_ctx *q_ctx;
3998 
3999 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
4000 	if (!vsi)
4001 		return NULL;
4002 	if (q_handle >= vsi->num_lan_q_entries[tc])
4003 		return NULL;
4004 	if (!vsi->lan_q_ctx[tc])
4005 		return NULL;
4006 	q_ctx = vsi->lan_q_ctx[tc];
4007 	return &q_ctx[q_handle];
4008 }
4009 
4010 /**
4011  * ice_ena_vsi_txq
4012  * @pi: port information structure
4013  * @vsi_handle: software VSI handle
4014  * @tc: TC number
4015  * @q_handle: software queue handle
4016  * @num_qgrps: Number of added queue groups
4017  * @buf: list of queue groups to be added
4018  * @buf_size: size of buffer for indirect command
4019  * @cd: pointer to command details structure or NULL
4020  *
4021  * This function adds one LAN queue
4022  */
4023 enum ice_status
4024 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4025 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4026 		struct ice_sq_cd *cd)
4027 {
4028 	struct ice_aqc_txsched_elem_data node = { 0 };
4029 	struct ice_sched_node *parent;
4030 	struct ice_q_ctx *q_ctx;
4031 	enum ice_status status;
4032 	struct ice_hw *hw;
4033 
4034 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4035 		return ICE_ERR_CFG;
4036 
4037 	if (num_qgrps > 1 || buf->num_txqs > 1)
4038 		return ICE_ERR_MAX_LIMIT;
4039 
4040 	hw = pi->hw;
4041 
4042 	if (!ice_is_vsi_valid(hw, vsi_handle))
4043 		return ICE_ERR_PARAM;
4044 
4045 	ice_acquire_lock(&pi->sched_lock);
4046 
4047 	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4048 	if (!q_ctx) {
4049 		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4050 			  q_handle);
4051 		status = ICE_ERR_PARAM;
4052 		goto ena_txq_exit;
4053 	}
4054 
4055 	/* find a parent node */
4056 	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4057 					    ICE_SCHED_NODE_OWNER_LAN);
4058 	if (!parent) {
4059 		status = ICE_ERR_PARAM;
4060 		goto ena_txq_exit;
4061 	}
4062 
4063 	buf->parent_teid = parent->info.node_teid;
4064 	node.parent_teid = parent->info.node_teid;
4065 	/* Mark that the values in the "generic" section as valid. The default
4066 	 * value in the "generic" section is zero. This means that :
4067 	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4068 	 * - 0 priority among siblings, indicated by Bit 1-3.
4069 	 * - WFQ, indicated by Bit 4.
4070 	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4071 	 * Bit 5-6.
4072 	 * - Bit 7 is reserved.
4073 	 * Without setting the generic section as valid in valid_sections, the
4074 	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4075 	 */
4076 	buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
4077 
4078 	/* add the LAN queue */
4079 	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4080 	if (status != ICE_SUCCESS) {
4081 		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4082 			  LE16_TO_CPU(buf->txqs[0].txq_id),
4083 			  hw->adminq.sq_last_status);
4084 		goto ena_txq_exit;
4085 	}
4086 
4087 	node.node_teid = buf->txqs[0].q_teid;
4088 	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4089 	q_ctx->q_handle = q_handle;
4090 	q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4091 
4092 	/* add a leaf node into scheduler tree queue layer */
4093 	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4094 	if (!status)
4095 		status = ice_sched_replay_q_bw(pi, q_ctx);
4096 
4097 ena_txq_exit:
4098 	ice_release_lock(&pi->sched_lock);
4099 	return status;
4100 }
4101 
4102 /**
4103  * ice_dis_vsi_txq
4104  * @pi: port information structure
4105  * @vsi_handle: software VSI handle
4106  * @tc: TC number
4107  * @num_queues: number of queues
4108  * @q_handles: pointer to software queue handle array
4109  * @q_ids: pointer to the q_id array
4110  * @q_teids: pointer to queue node teids
4111  * @rst_src: if called due to reset, specifies the reset source
4112  * @vmvf_num: the relative VM or VF number that is undergoing the reset
4113  * @cd: pointer to command details structure or NULL
4114  *
4115  * This function removes queues and their corresponding nodes in SW DB
4116  */
4117 enum ice_status
4118 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4119 		u16 *q_handles, u16 *q_ids, u32 *q_teids,
4120 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
4121 		struct ice_sq_cd *cd)
4122 {
4123 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4124 	struct ice_aqc_dis_txq_item qg_list;
4125 	struct ice_q_ctx *q_ctx;
4126 	u16 i;
4127 
4128 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4129 		return ICE_ERR_CFG;
4130 
4131 	if (!num_queues) {
4132 		/* if queue is disabled already yet the disable queue command
4133 		 * has to be sent to complete the VF reset, then call
4134 		 * ice_aq_dis_lan_txq without any queue information
4135 		 */
4136 		if (rst_src)
4137 			return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
4138 						  vmvf_num, NULL);
4139 		return ICE_ERR_CFG;
4140 	}
4141 
4142 	ice_acquire_lock(&pi->sched_lock);
4143 
4144 	for (i = 0; i < num_queues; i++) {
4145 		struct ice_sched_node *node;
4146 
4147 		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4148 		if (!node)
4149 			continue;
4150 		q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
4151 		if (!q_ctx) {
4152 			ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4153 				  q_handles[i]);
4154 			continue;
4155 		}
4156 		if (q_ctx->q_handle != q_handles[i]) {
4157 			ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4158 				  q_ctx->q_handle, q_handles[i]);
4159 			continue;
4160 		}
4161 		qg_list.parent_teid = node->info.parent_teid;
4162 		qg_list.num_qs = 1;
4163 		qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
4164 		status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
4165 					    sizeof(qg_list), rst_src, vmvf_num,
4166 					    cd);
4167 
4168 		if (status != ICE_SUCCESS)
4169 			break;
4170 		ice_free_sched_node(pi, node);
4171 		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4172 	}
4173 	ice_release_lock(&pi->sched_lock);
4174 	return status;
4175 }
4176 
4177 /**
4178  * ice_cfg_vsi_qs - configure the new/existing VSI queues
4179  * @pi: port information structure
4180  * @vsi_handle: software VSI handle
4181  * @tc_bitmap: TC bitmap
4182  * @maxqs: max queues array per TC
4183  * @owner: LAN or RDMA
4184  *
4185  * This function adds/updates the VSI queues per TC.
4186  */
4187 static enum ice_status
4188 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4189 	       u16 *maxqs, u8 owner)
4190 {
4191 	enum ice_status status = ICE_SUCCESS;
4192 	u8 i;
4193 
4194 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4195 		return ICE_ERR_CFG;
4196 
4197 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4198 		return ICE_ERR_PARAM;
4199 
4200 	ice_acquire_lock(&pi->sched_lock);
4201 
4202 	ice_for_each_traffic_class(i) {
4203 		/* configuration is possible only if TC node is present */
4204 		if (!ice_sched_get_tc_node(pi, i))
4205 			continue;
4206 
4207 		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4208 					   ice_is_tc_ena(tc_bitmap, i));
4209 		if (status)
4210 			break;
4211 	}
4212 
4213 	ice_release_lock(&pi->sched_lock);
4214 	return status;
4215 }
4216 
4217 /**
4218  * ice_cfg_vsi_lan - configure VSI LAN queues
4219  * @pi: port information structure
4220  * @vsi_handle: software VSI handle
4221  * @tc_bitmap: TC bitmap
4222  * @max_lanqs: max LAN queues array per TC
4223  *
4224  * This function adds/updates the VSI LAN queues per TC.
4225  */
4226 enum ice_status
4227 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4228 		u16 *max_lanqs)
4229 {
4230 	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4231 			      ICE_SCHED_NODE_OWNER_LAN);
4232 }
4233 
4234 /**
4235  * ice_replay_pre_init - replay pre initialization
4236  * @hw: pointer to the HW struct
4237  *
4238  * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4239  */
4240 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4241 {
4242 	struct ice_switch_info *sw = hw->switch_info;
4243 	u8 i;
4244 
4245 	/* Delete old entries from replay filter list head if there is any */
4246 	ice_rm_all_sw_replay_rule_info(hw);
4247 	/* In start of replay, move entries into replay_rules list, it
4248 	 * will allow adding rules entries back to filt_rules list,
4249 	 * which is operational list.
4250 	 */
4251 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4252 		LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4253 				  &sw->recp_list[i].filt_replay_rules);
4254 	ice_sched_replay_agg_vsi_preinit(hw);
4255 
4256 	return ice_sched_replay_tc_node_bw(hw->port_info);
4257 }
4258 
4259 /**
4260  * ice_replay_vsi - replay VSI configuration
4261  * @hw: pointer to the HW struct
4262  * @vsi_handle: driver VSI handle
4263  *
4264  * Restore all VSI configuration after reset. It is required to call this
4265  * function with main VSI first.
4266  */
4267 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4268 {
4269 	enum ice_status status;
4270 
4271 	if (!ice_is_vsi_valid(hw, vsi_handle))
4272 		return ICE_ERR_PARAM;
4273 
4274 	/* Replay pre-initialization if there is any */
4275 	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4276 		status = ice_replay_pre_init(hw);
4277 		if (status)
4278 			return status;
4279 	}
4280 	/* Replay per VSI all RSS configurations */
4281 	status = ice_replay_rss_cfg(hw, vsi_handle);
4282 	if (status)
4283 		return status;
4284 	/* Replay per VSI all filters */
4285 	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4286 	if (!status)
4287 		status = ice_replay_vsi_agg(hw, vsi_handle);
4288 	return status;
4289 }
4290 
4291 /**
4292  * ice_replay_post - post replay configuration cleanup
4293  * @hw: pointer to the HW struct
4294  *
4295  * Post replay cleanup.
4296  */
4297 void ice_replay_post(struct ice_hw *hw)
4298 {
4299 	/* Delete old entries from replay filter list head */
4300 	ice_rm_all_sw_replay_rule_info(hw);
4301 	ice_sched_replay_agg(hw);
4302 }
4303 
4304 /**
4305  * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4306  * @hw: ptr to the hardware info
4307  * @reg: offset of 64 bit HW register to read from
4308  * @prev_stat_loaded: bool to specify if previous stats are loaded
4309  * @prev_stat: ptr to previous loaded stat value
4310  * @cur_stat: ptr to current stat value
4311  */
4312 void
4313 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4314 		  u64 *prev_stat, u64 *cur_stat)
4315 {
4316 	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4317 
4318 	/* device stats are not reset at PFR, they likely will not be zeroed
4319 	 * when the driver starts. Thus, save the value from the first read
4320 	 * without adding to the statistic value so that we report stats which
4321 	 * count up from zero.
4322 	 */
4323 	if (!prev_stat_loaded) {
4324 		*prev_stat = new_data;
4325 		return;
4326 	}
4327 
4328 	/* Calculate the difference between the new and old values, and then
4329 	 * add it to the software stat value.
4330 	 */
4331 	if (new_data >= *prev_stat)
4332 		*cur_stat += new_data - *prev_stat;
4333 	else
4334 		/* to manage the potential roll-over */
4335 		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4336 
4337 	/* Update the previously stored value to prepare for next read */
4338 	*prev_stat = new_data;
4339 }
4340 
4341 /**
4342  * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4343  * @hw: ptr to the hardware info
4344  * @reg: offset of HW register to read from
4345  * @prev_stat_loaded: bool to specify if previous stats are loaded
4346  * @prev_stat: ptr to previous loaded stat value
4347  * @cur_stat: ptr to current stat value
4348  */
4349 void
4350 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4351 		  u64 *prev_stat, u64 *cur_stat)
4352 {
4353 	u32 new_data;
4354 
4355 	new_data = rd32(hw, reg);
4356 
4357 	/* device stats are not reset at PFR, they likely will not be zeroed
4358 	 * when the driver starts. Thus, save the value from the first read
4359 	 * without adding to the statistic value so that we report stats which
4360 	 * count up from zero.
4361 	 */
4362 	if (!prev_stat_loaded) {
4363 		*prev_stat = new_data;
4364 		return;
4365 	}
4366 
4367 	/* Calculate the difference between the new and old values, and then
4368 	 * add it to the software stat value.
4369 	 */
4370 	if (new_data >= *prev_stat)
4371 		*cur_stat += new_data - *prev_stat;
4372 	else
4373 		/* to manage the potential roll-over */
4374 		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4375 
4376 	/* Update the previously stored value to prepare for next read */
4377 	*prev_stat = new_data;
4378 }
4379 
4380 /**
4381  * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4382  * @hw: ptr to the hardware info
4383  * @vsi_handle: VSI handle
4384  * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4385  * @cur_stats: ptr to current stats structure
4386  *
4387  * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4388  * thus cannot be read using the normal ice_stat_update32 function.
4389  *
4390  * Read the GLV_REPC register associated with the given VSI, and update the
4391  * rx_no_desc and rx_error values in the ice_eth_stats structure.
4392  *
4393  * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4394  * cleared each time it's read.
4395  *
4396  * Note that the GLV_RDPC register also counts the causes that would trigger
4397  * GLV_REPC. However, it does not give the finer grained detail about why the
4398  * packets are being dropped. The GLV_REPC values can be used to distinguish
4399  * whether Rx packets are dropped due to errors or due to no available
4400  * descriptors.
4401  */
4402 void
4403 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4404 		     struct ice_eth_stats *cur_stats)
4405 {
4406 	u16 vsi_num, no_desc, error_cnt;
4407 	u32 repc;
4408 
4409 	if (!ice_is_vsi_valid(hw, vsi_handle))
4410 		return;
4411 
4412 	vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4413 
4414 	/* If we haven't loaded stats yet, just clear the current value */
4415 	if (!prev_stat_loaded) {
4416 		wr32(hw, GLV_REPC(vsi_num), 0);
4417 		return;
4418 	}
4419 
4420 	repc = rd32(hw, GLV_REPC(vsi_num));
4421 	no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4422 	error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4423 
4424 	/* Clear the count by writing to the stats register */
4425 	wr32(hw, GLV_REPC(vsi_num), 0);
4426 
4427 	cur_stats->rx_no_desc += no_desc;
4428 	cur_stats->rx_errors += error_cnt;
4429 }
4430 
4431 /**
4432  * ice_aq_alternate_write
4433  * @hw: pointer to the hardware structure
4434  * @reg_addr0: address of first dword to be written
4435  * @reg_val0: value to be written under 'reg_addr0'
4436  * @reg_addr1: address of second dword to be written
4437  * @reg_val1: value to be written under 'reg_addr1'
4438  *
4439  * Write one or two dwords to alternate structure. Fields are indicated
4440  * by 'reg_addr0' and 'reg_addr1' register numbers.
4441  */
4442 enum ice_status
4443 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
4444 		       u32 reg_addr1, u32 reg_val1)
4445 {
4446 	struct ice_aqc_read_write_alt_direct *cmd;
4447 	struct ice_aq_desc desc;
4448 	enum ice_status status;
4449 
4450 	cmd = &desc.params.read_write_alt_direct;
4451 
4452 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
4453 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4454 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4455 	cmd->dword0_value = CPU_TO_LE32(reg_val0);
4456 	cmd->dword1_value = CPU_TO_LE32(reg_val1);
4457 
4458 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4459 
4460 	return status;
4461 }
4462 
4463 /**
4464  * ice_aq_alternate_read
4465  * @hw: pointer to the hardware structure
4466  * @reg_addr0: address of first dword to be read
4467  * @reg_val0: pointer for data read from 'reg_addr0'
4468  * @reg_addr1: address of second dword to be read
4469  * @reg_val1: pointer for data read from 'reg_addr1'
4470  *
4471  * Read one or two dwords from alternate structure. Fields are indicated
4472  * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4473  * is not passed then only register at 'reg_addr0' is read.
4474  */
4475 enum ice_status
4476 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
4477 		      u32 reg_addr1, u32 *reg_val1)
4478 {
4479 	struct ice_aqc_read_write_alt_direct *cmd;
4480 	struct ice_aq_desc desc;
4481 	enum ice_status status;
4482 
4483 	cmd = &desc.params.read_write_alt_direct;
4484 
4485 	if (!reg_val0)
4486 		return ICE_ERR_PARAM;
4487 
4488 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
4489 	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4490 	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4491 
4492 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4493 
4494 	if (status == ICE_SUCCESS) {
4495 		*reg_val0 = LE32_TO_CPU(cmd->dword0_value);
4496 
4497 		if (reg_val1)
4498 			*reg_val1 = LE32_TO_CPU(cmd->dword1_value);
4499 	}
4500 
4501 	return status;
4502 }
4503 
4504 /**
4505  *  ice_aq_alternate_write_done
4506  *  @hw: pointer to the HW structure.
4507  *  @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
4508  *  @reset_needed: indicates the SW should trigger GLOBAL reset
4509  *
4510  *  Indicates to the FW that alternate structures have been changed.
4511  */
4512 enum ice_status
4513 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
4514 {
4515 	struct ice_aqc_done_alt_write *cmd;
4516 	struct ice_aq_desc desc;
4517 	enum ice_status status;
4518 
4519 	cmd = &desc.params.done_alt_write;
4520 
4521 	if (!reset_needed)
4522 		return ICE_ERR_PARAM;
4523 
4524 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
4525 	cmd->flags = bios_mode;
4526 
4527 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4528 	if (!status)
4529 		*reset_needed = (LE16_TO_CPU(cmd->flags) &
4530 				 ICE_AQC_RESP_RESET_NEEDED) != 0;
4531 
4532 	return status;
4533 }
4534 
4535 /**
4536  *  ice_aq_alternate_clear
4537  *  @hw: pointer to the HW structure.
4538  *
4539  *  Clear the alternate structures of the port from which the function
4540  *  is called.
4541  */
4542 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
4543 {
4544 	struct ice_aq_desc desc;
4545 	enum ice_status status;
4546 
4547 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
4548 
4549 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4550 
4551 	return status;
4552 }
4553 
4554 /**
4555  * ice_sched_query_elem - query element information from HW
4556  * @hw: pointer to the HW struct
4557  * @node_teid: node TEID to be queried
4558  * @buf: buffer to element information
4559  *
4560  * This function queries HW element information
4561  */
4562 enum ice_status
4563 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4564 		     struct ice_aqc_get_elem *buf)
4565 {
4566 	u16 buf_size, num_elem_ret = 0;
4567 	enum ice_status status;
4568 
4569 	buf_size = sizeof(*buf);
4570 	ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4571 	buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
4572 	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4573 					  NULL);
4574 	if (status != ICE_SUCCESS || num_elem_ret != 1)
4575 		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4576 	return status;
4577 }
4578 
4579 /**
4580  * ice_get_fw_mode - returns FW mode
4581  * @hw: pointer to the HW struct
4582  */
4583 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4584 {
4585 #define ICE_FW_MODE_DBG_M BIT(0)
4586 #define ICE_FW_MODE_REC_M BIT(1)
4587 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4588 	u32 fw_mode;
4589 
4590 	/* check the current FW mode */
4591 	fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4592 
4593 	if (fw_mode & ICE_FW_MODE_DBG_M)
4594 		return ICE_FW_MODE_DBG;
4595 	else if (fw_mode & ICE_FW_MODE_REC_M)
4596 		return ICE_FW_MODE_REC;
4597 	else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4598 		return ICE_FW_MODE_ROLLBACK;
4599 	else
4600 		return ICE_FW_MODE_NORMAL;
4601 }
4602 
4603 /**
4604  * ice_cfg_get_cur_lldp_persist_status
4605  * @hw: pointer to the HW struct
4606  * @lldp_status: return value of LLDP persistent status
4607  *
4608  * Get the current status of LLDP persistent
4609  */
4610 enum ice_status
4611 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4612 {
4613 	struct ice_port_info *pi = hw->port_info;
4614 	enum ice_status ret;
4615 	__le32 raw_data;
4616 	u32 data, mask;
4617 
4618 	if (!lldp_status)
4619 		return ICE_ERR_BAD_PTR;
4620 
4621 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
4622 	if (ret)
4623 		return ret;
4624 
4625 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
4626 			      ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
4627 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
4628 			      false, true, NULL);
4629 	if (!ret) {
4630 		data = LE32_TO_CPU(raw_data);
4631 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
4632 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4633 		data = data & mask;
4634 		*lldp_status = data >>
4635 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4636 	}
4637 
4638 	ice_release_nvm(hw);
4639 
4640 	return ret;
4641 }
4642 
4643 /**
4644  * ice_get_dflt_lldp_persist_status
4645  * @hw: pointer to the HW struct
4646  * @lldp_status: return value of LLDP persistent status
4647  *
4648  * Get the default status of LLDP persistent
4649  */
4650 enum ice_status
4651 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4652 {
4653 	struct ice_port_info *pi = hw->port_info;
4654 	u32 data, mask, loc_data, loc_data_tmp;
4655 	enum ice_status ret;
4656 	__le16 loc_raw_data;
4657 	__le32 raw_data;
4658 
4659 	if (!lldp_status)
4660 		return ICE_ERR_BAD_PTR;
4661 
4662 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
4663 	if (ret)
4664 		return ret;
4665 
4666 	/* Read the offset of EMP_SR_PTR */
4667 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
4668 			      ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
4669 			      ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
4670 			      &loc_raw_data, false, true, NULL);
4671 	if (ret)
4672 		goto exit;
4673 
4674 	loc_data = LE16_TO_CPU(loc_raw_data);
4675 	if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
4676 		loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
4677 		loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
4678 	} else {
4679 		loc_data *= ICE_AQC_NVM_WORD_UNIT;
4680 	}
4681 
4682 	/* Read the offset of LLDP configuration pointer */
4683 	loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
4684 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
4685 			      ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
4686 			      false, true, NULL);
4687 	if (ret)
4688 		goto exit;
4689 
4690 	loc_data_tmp = LE16_TO_CPU(loc_raw_data);
4691 	loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
4692 	loc_data += loc_data_tmp;
4693 
4694 	/* We need to skip LLDP configuration section length (2 bytes)*/
4695 	loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
4696 
4697 	/* Read the LLDP Default Configure */
4698 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
4699 			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
4700 			      true, NULL);
4701 	if (!ret) {
4702 		data = LE32_TO_CPU(raw_data);
4703 		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
4704 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4705 		data = data & mask;
4706 		*lldp_status = data >>
4707 			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4708 	}
4709 
4710 exit:
4711 	ice_release_nvm(hw);
4712 
4713 	return ret;
4714 }
4715 
4716 /**
4717  * ice_get_netlist_ver_info
4718  * @hw: pointer to the HW struct
4719  *
4720  * Get the netlist version information
4721  */
4722 enum ice_status
4723 ice_get_netlist_ver_info(struct ice_hw *hw)
4724 {
4725 	struct ice_netlist_ver_info *ver = &hw->netlist_ver;
4726 	enum ice_status ret;
4727 	u32 id_blk_start;
4728 	__le16 raw_data;
4729 	u16 data, i;
4730 	u16 *buff;
4731 
4732 	ret = ice_acquire_nvm(hw, ICE_RES_READ);
4733 	if (ret)
4734 		return ret;
4735 	buff = (u16 *)ice_calloc(hw, ICE_AQC_NVM_NETLIST_ID_BLK_LEN,
4736 				 sizeof(*buff));
4737 	if (!buff) {
4738 		ret = ICE_ERR_NO_MEMORY;
4739 		goto exit_no_mem;
4740 	}
4741 
4742 	/* read module length */
4743 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
4744 			      ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
4745 			      ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
4746 			      false, false, NULL);
4747 	if (ret)
4748 		goto exit_error;
4749 
4750 	data = LE16_TO_CPU(raw_data);
4751 	/* exit if length is = 0 */
4752 	if (!data)
4753 		goto exit_error;
4754 
4755 	/* read node count */
4756 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
4757 			      ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
4758 			      ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
4759 			      false, false, NULL);
4760 	if (ret)
4761 		goto exit_error;
4762 	data = LE16_TO_CPU(raw_data);
4763 
4764 	/* netlist ID block starts from offset 4 + node count * 2 */
4765 	id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
4766 
4767 	/* read the entire netlist ID block */
4768 	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
4769 			      id_blk_start * 2,
4770 			      ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
4771 			      false, NULL);
4772 	if (ret)
4773 		goto exit_error;
4774 
4775 	for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
4776 		buff[i] = LE16_TO_CPU(((_FORCE_ __le16 *)buff)[i]);
4777 
4778 	ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
4779 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
4780 	ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
4781 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
4782 	ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
4783 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
4784 	ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
4785 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
4786 	ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
4787 	/* Read the left most 4 bytes of SHA */
4788 	ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
4789 		buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
4790 
4791 exit_error:
4792 	ice_free(hw, buff);
4793 exit_no_mem:
4794 	ice_release_nvm(hw);
4795 	return ret;
4796 }
4797 
4798 /**
4799  * ice_fw_supports_link_override
4800  * @hw: pointer to the hardware structure
4801  *
4802  * Checks if the firmware supports link override
4803  */
4804 bool ice_fw_supports_link_override(struct ice_hw *hw)
4805 {
4806 	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4807 		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4808 			return true;
4809 		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4810 		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4811 			return true;
4812 	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4813 		return true;
4814 	}
4815 
4816 	return false;
4817 }
4818 
4819 /**
4820  * ice_get_link_default_override
4821  * @ldo: pointer to the link default override struct
4822  * @pi: pointer to the port info struct
4823  *
4824  * Gets the link default override for a port
4825  */
4826 enum ice_status
4827 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4828 			      struct ice_port_info *pi)
4829 {
4830 	u16 i, tlv, tlv_len, tlv_start, buf, offset;
4831 	struct ice_hw *hw = pi->hw;
4832 	enum ice_status status;
4833 
4834 	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4835 					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4836 	if (status) {
4837 		ice_debug(hw, ICE_DBG_INIT,
4838 			  "Failed to read link override TLV.\n");
4839 		return status;
4840 	}
4841 
4842 	/* Each port has its own config; calculate for our port */
4843 	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4844 		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4845 
4846 	/* link options first */
4847 	status = ice_read_sr_word(hw, tlv_start, &buf);
4848 	if (status) {
4849 		ice_debug(hw, ICE_DBG_INIT,
4850 			  "Failed to read override link options.\n");
4851 		return status;
4852 	}
4853 	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4854 	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4855 		ICE_LINK_OVERRIDE_PHY_CFG_S;
4856 
4857 	/* link PHY config */
4858 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4859 	status = ice_read_sr_word(hw, offset, &buf);
4860 	if (status) {
4861 		ice_debug(hw, ICE_DBG_INIT,
4862 			  "Failed to read override phy config.\n");
4863 		return status;
4864 	}
4865 	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4866 
4867 	/* PHY types low */
4868 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4869 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4870 		status = ice_read_sr_word(hw, (offset + i), &buf);
4871 		if (status) {
4872 			ice_debug(hw, ICE_DBG_INIT,
4873 				  "Failed to read override link options.\n");
4874 			return status;
4875 		}
4876 		/* shift 16 bits at a time to fill 64 bits */
4877 		ldo->phy_type_low |= ((u64)buf << (i * 16));
4878 	}
4879 
4880 	/* PHY types high */
4881 	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4882 		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4883 	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4884 		status = ice_read_sr_word(hw, (offset + i), &buf);
4885 		if (status) {
4886 			ice_debug(hw, ICE_DBG_INIT,
4887 				  "Failed to read override link options.\n");
4888 			return status;
4889 		}
4890 		/* shift 16 bits at a time to fill 64 bits */
4891 		ldo->phy_type_high |= ((u64)buf << (i * 16));
4892 	}
4893 
4894 	return status;
4895 }
4896