1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2024, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "ice_common.h"
33 #include "ice_sched.h"
34 #include "ice_adminq_cmd.h"
35
36 #include "ice_flow.h"
37 #include "ice_switch.h"
38
39 #define ICE_PF_RESET_WAIT_COUNT 500
40
41 static const char * const ice_link_mode_str_low[] = {
42 ice_arr_elem_idx(0, "100BASE_TX"),
43 ice_arr_elem_idx(1, "100M_SGMII"),
44 ice_arr_elem_idx(2, "1000BASE_T"),
45 ice_arr_elem_idx(3, "1000BASE_SX"),
46 ice_arr_elem_idx(4, "1000BASE_LX"),
47 ice_arr_elem_idx(5, "1000BASE_KX"),
48 ice_arr_elem_idx(6, "1G_SGMII"),
49 ice_arr_elem_idx(7, "2500BASE_T"),
50 ice_arr_elem_idx(8, "2500BASE_X"),
51 ice_arr_elem_idx(9, "2500BASE_KX"),
52 ice_arr_elem_idx(10, "5GBASE_T"),
53 ice_arr_elem_idx(11, "5GBASE_KR"),
54 ice_arr_elem_idx(12, "10GBASE_T"),
55 ice_arr_elem_idx(13, "10G_SFI_DA"),
56 ice_arr_elem_idx(14, "10GBASE_SR"),
57 ice_arr_elem_idx(15, "10GBASE_LR"),
58 ice_arr_elem_idx(16, "10GBASE_KR_CR1"),
59 ice_arr_elem_idx(17, "10G_SFI_AOC_ACC"),
60 ice_arr_elem_idx(18, "10G_SFI_C2C"),
61 ice_arr_elem_idx(19, "25GBASE_T"),
62 ice_arr_elem_idx(20, "25GBASE_CR"),
63 ice_arr_elem_idx(21, "25GBASE_CR_S"),
64 ice_arr_elem_idx(22, "25GBASE_CR1"),
65 ice_arr_elem_idx(23, "25GBASE_SR"),
66 ice_arr_elem_idx(24, "25GBASE_LR"),
67 ice_arr_elem_idx(25, "25GBASE_KR"),
68 ice_arr_elem_idx(26, "25GBASE_KR_S"),
69 ice_arr_elem_idx(27, "25GBASE_KR1"),
70 ice_arr_elem_idx(28, "25G_AUI_AOC_ACC"),
71 ice_arr_elem_idx(29, "25G_AUI_C2C"),
72 ice_arr_elem_idx(30, "40GBASE_CR4"),
73 ice_arr_elem_idx(31, "40GBASE_SR4"),
74 ice_arr_elem_idx(32, "40GBASE_LR4"),
75 ice_arr_elem_idx(33, "40GBASE_KR4"),
76 ice_arr_elem_idx(34, "40G_XLAUI_AOC_ACC"),
77 ice_arr_elem_idx(35, "40G_XLAUI"),
78 ice_arr_elem_idx(36, "50GBASE_CR2"),
79 ice_arr_elem_idx(37, "50GBASE_SR2"),
80 ice_arr_elem_idx(38, "50GBASE_LR2"),
81 ice_arr_elem_idx(39, "50GBASE_KR2"),
82 ice_arr_elem_idx(40, "50G_LAUI2_AOC_ACC"),
83 ice_arr_elem_idx(41, "50G_LAUI2"),
84 ice_arr_elem_idx(42, "50G_AUI2_AOC_ACC"),
85 ice_arr_elem_idx(43, "50G_AUI2"),
86 ice_arr_elem_idx(44, "50GBASE_CP"),
87 ice_arr_elem_idx(45, "50GBASE_SR"),
88 ice_arr_elem_idx(46, "50GBASE_FR"),
89 ice_arr_elem_idx(47, "50GBASE_LR"),
90 ice_arr_elem_idx(48, "50GBASE_KR_PAM4"),
91 ice_arr_elem_idx(49, "50G_AUI1_AOC_ACC"),
92 ice_arr_elem_idx(50, "50G_AUI1"),
93 ice_arr_elem_idx(51, "100GBASE_CR4"),
94 ice_arr_elem_idx(52, "100GBASE_SR4"),
95 ice_arr_elem_idx(53, "100GBASE_LR4"),
96 ice_arr_elem_idx(54, "100GBASE_KR4"),
97 ice_arr_elem_idx(55, "100G_CAUI4_AOC_ACC"),
98 ice_arr_elem_idx(56, "100G_CAUI4"),
99 ice_arr_elem_idx(57, "100G_AUI4_AOC_ACC"),
100 ice_arr_elem_idx(58, "100G_AUI4"),
101 ice_arr_elem_idx(59, "100GBASE_CR_PAM4"),
102 ice_arr_elem_idx(60, "100GBASE_KR_PAM4"),
103 ice_arr_elem_idx(61, "100GBASE_CP2"),
104 ice_arr_elem_idx(62, "100GBASE_SR2"),
105 ice_arr_elem_idx(63, "100GBASE_DR"),
106 };
107
108 static const char * const ice_link_mode_str_high[] = {
109 ice_arr_elem_idx(0, "100GBASE_KR2_PAM4"),
110 ice_arr_elem_idx(1, "100G_CAUI2_AOC_ACC"),
111 ice_arr_elem_idx(2, "100G_CAUI2"),
112 ice_arr_elem_idx(3, "100G_AUI2_AOC_ACC"),
113 ice_arr_elem_idx(4, "100G_AUI2"),
114 };
115
116 /**
117 * ice_dump_phy_type - helper function to dump phy_type
118 * @hw: pointer to the HW structure
119 * @low: 64 bit value for phy_type_low
120 * @high: 64 bit value for phy_type_high
121 * @prefix: prefix string to differentiate multiple dumps
122 */
123 static void
ice_dump_phy_type(struct ice_hw * hw,u64 low,u64 high,const char * prefix)124 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
125 {
126 u32 i;
127
128 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
129 (unsigned long long)low);
130
131 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) {
132 if (low & BIT_ULL(i))
133 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
134 prefix, i, ice_link_mode_str_low[i]);
135 }
136
137 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
138 (unsigned long long)high);
139
140 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) {
141 if (high & BIT_ULL(i))
142 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
143 prefix, i, ice_link_mode_str_high[i]);
144 }
145 }
146
147 /**
148 * ice_set_mac_type - Sets MAC type
149 * @hw: pointer to the HW structure
150 *
151 * This function sets the MAC type of the adapter based on the
152 * vendor ID and device ID stored in the HW structure.
153 */
ice_set_mac_type(struct ice_hw * hw)154 enum ice_status ice_set_mac_type(struct ice_hw *hw)
155 {
156 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
157
158 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
159 return ICE_ERR_DEVICE_NOT_SUPPORTED;
160
161 switch (hw->device_id) {
162 case ICE_DEV_ID_E810C_BACKPLANE:
163 case ICE_DEV_ID_E810C_QSFP:
164 case ICE_DEV_ID_E810C_SFP:
165 case ICE_DEV_ID_E810_XXV_BACKPLANE:
166 case ICE_DEV_ID_E810_XXV_QSFP:
167 case ICE_DEV_ID_E810_XXV_SFP:
168 hw->mac_type = ICE_MAC_E810;
169 break;
170 case ICE_DEV_ID_E822C_10G_BASE_T:
171 case ICE_DEV_ID_E822C_BACKPLANE:
172 case ICE_DEV_ID_E822C_QSFP:
173 case ICE_DEV_ID_E822C_SFP:
174 case ICE_DEV_ID_E822C_SGMII:
175 case ICE_DEV_ID_E822L_10G_BASE_T:
176 case ICE_DEV_ID_E822L_BACKPLANE:
177 case ICE_DEV_ID_E822L_SFP:
178 case ICE_DEV_ID_E822L_SGMII:
179 case ICE_DEV_ID_E823L_10G_BASE_T:
180 case ICE_DEV_ID_E823L_1GBE:
181 case ICE_DEV_ID_E823L_BACKPLANE:
182 case ICE_DEV_ID_E823L_QSFP:
183 case ICE_DEV_ID_E823L_SFP:
184 case ICE_DEV_ID_E823C_10G_BASE_T:
185 case ICE_DEV_ID_E823C_BACKPLANE:
186 case ICE_DEV_ID_E823C_QSFP:
187 case ICE_DEV_ID_E823C_SFP:
188 case ICE_DEV_ID_E823C_SGMII:
189 hw->mac_type = ICE_MAC_GENERIC;
190 break;
191 default:
192 hw->mac_type = ICE_MAC_UNKNOWN;
193 break;
194 }
195
196 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
197 return ICE_SUCCESS;
198 }
199
200 /**
201 * ice_is_e810
202 * @hw: pointer to the hardware structure
203 *
204 * returns true if the device is E810 based, false if not.
205 */
ice_is_e810(struct ice_hw * hw)206 bool ice_is_e810(struct ice_hw *hw)
207 {
208 return hw->mac_type == ICE_MAC_E810;
209 }
210
211 /**
212 * ice_is_e810t
213 * @hw: pointer to the hardware structure
214 *
215 * returns true if the device is E810T based, false if not.
216 */
ice_is_e810t(struct ice_hw * hw)217 bool ice_is_e810t(struct ice_hw *hw)
218 {
219 switch (hw->device_id) {
220 case ICE_DEV_ID_E810C_SFP:
221 switch (hw->subsystem_device_id) {
222 case ICE_SUBDEV_ID_E810T:
223 case ICE_SUBDEV_ID_E810T2:
224 case ICE_SUBDEV_ID_E810T3:
225 case ICE_SUBDEV_ID_E810T4:
226 case ICE_SUBDEV_ID_E810T5:
227 case ICE_SUBDEV_ID_E810T7:
228 return true;
229 }
230 break;
231 case ICE_DEV_ID_E810C_QSFP:
232 switch (hw->subsystem_device_id) {
233 case ICE_SUBDEV_ID_E810T2:
234 case ICE_SUBDEV_ID_E810T5:
235 case ICE_SUBDEV_ID_E810T6:
236 return true;
237 }
238 break;
239 default:
240 break;
241 }
242
243 return false;
244 }
245
246 /**
247 * ice_is_e823
248 * @hw: pointer to the hardware structure
249 *
250 * returns true if the device is E823-L or E823-C based, false if not.
251 */
ice_is_e823(struct ice_hw * hw)252 bool ice_is_e823(struct ice_hw *hw)
253 {
254 switch (hw->device_id) {
255 case ICE_DEV_ID_E823L_BACKPLANE:
256 case ICE_DEV_ID_E823L_SFP:
257 case ICE_DEV_ID_E823L_10G_BASE_T:
258 case ICE_DEV_ID_E823L_1GBE:
259 case ICE_DEV_ID_E823L_QSFP:
260 case ICE_DEV_ID_E823C_BACKPLANE:
261 case ICE_DEV_ID_E823C_QSFP:
262 case ICE_DEV_ID_E823C_SFP:
263 case ICE_DEV_ID_E823C_10G_BASE_T:
264 case ICE_DEV_ID_E823C_SGMII:
265 return true;
266 default:
267 return false;
268 }
269 }
270
271 /**
272 * ice_clear_pf_cfg - Clear PF configuration
273 * @hw: pointer to the hardware structure
274 *
275 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
276 * configuration, flow director filters, etc.).
277 */
ice_clear_pf_cfg(struct ice_hw * hw)278 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
279 {
280 struct ice_aq_desc desc;
281
282 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
283
284 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
285 }
286
287 /**
288 * ice_aq_manage_mac_read - manage MAC address read command
289 * @hw: pointer to the HW struct
290 * @buf: a virtual buffer to hold the manage MAC read response
291 * @buf_size: Size of the virtual buffer
292 * @cd: pointer to command details structure or NULL
293 *
294 * This function is used to return per PF station MAC address (0x0107).
295 * NOTE: Upon successful completion of this command, MAC address information
296 * is returned in user specified buffer. Please interpret user specified
297 * buffer as "manage_mac_read" response.
298 * Response such as various MAC addresses are stored in HW struct (port.mac)
299 * ice_discover_dev_caps is expected to be called before this function is
300 * called.
301 */
302 enum ice_status
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)303 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
304 struct ice_sq_cd *cd)
305 {
306 struct ice_aqc_manage_mac_read_resp *resp;
307 struct ice_aqc_manage_mac_read *cmd;
308 struct ice_aq_desc desc;
309 enum ice_status status;
310 u16 flags;
311 u8 i;
312
313 cmd = &desc.params.mac_read;
314
315 if (buf_size < sizeof(*resp))
316 return ICE_ERR_BUF_TOO_SHORT;
317
318 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
319
320 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
321 if (status)
322 return status;
323
324 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
325 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
326
327 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
328 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
329 return ICE_ERR_CFG;
330 }
331
332 /* A single port can report up to two (LAN and WoL) addresses */
333 for (i = 0; i < cmd->num_addr; i++)
334 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
335 ice_memcpy(hw->port_info->mac.lan_addr,
336 resp[i].mac_addr, ETH_ALEN,
337 ICE_NONDMA_TO_NONDMA);
338 ice_memcpy(hw->port_info->mac.perm_addr,
339 resp[i].mac_addr,
340 ETH_ALEN, ICE_NONDMA_TO_NONDMA);
341 break;
342 }
343 return ICE_SUCCESS;
344 }
345
346 /**
347 * ice_phy_maps_to_media
348 * @phy_type_low: PHY type low bits
349 * @phy_type_high: PHY type high bits
350 * @media_mask_low: media type PHY type low bitmask
351 * @media_mask_high: media type PHY type high bitmask
352 *
353 * Return true if PHY type [low|high] bits are only of media type PHY types
354 * [low|high] bitmask.
355 */
356 static bool
ice_phy_maps_to_media(u64 phy_type_low,u64 phy_type_high,u64 media_mask_low,u64 media_mask_high)357 ice_phy_maps_to_media(u64 phy_type_low, u64 phy_type_high,
358 u64 media_mask_low, u64 media_mask_high)
359 {
360 /* check if a PHY type exist for media type */
361 if (!(phy_type_low & media_mask_low ||
362 phy_type_high & media_mask_high))
363 return false;
364
365 /* check that PHY types are only of media type */
366 if (!(phy_type_low & ~media_mask_low) &&
367 !(phy_type_high & ~media_mask_high))
368 return true;
369
370 return false;
371 }
372
373 /**
374 * ice_set_media_type - Sets media type
375 * @pi: port information structure
376 *
377 * Set ice_port_info PHY media type based on PHY type. This should be called
378 * from Get PHY caps with media.
379 */
ice_set_media_type(struct ice_port_info * pi)380 static void ice_set_media_type(struct ice_port_info *pi)
381 {
382 enum ice_media_type *media_type;
383 u64 phy_type_high, phy_type_low;
384
385 phy_type_high = pi->phy.phy_type_high;
386 phy_type_low = pi->phy.phy_type_low;
387 media_type = &pi->phy.media_type;
388
389 /* if no media, then media type is NONE */
390 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
391 *media_type = ICE_MEDIA_NONE;
392 /* else if PHY types are only BASE-T, then media type is BASET */
393 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
394 ICE_MEDIA_BASET_PHY_TYPE_LOW_M, 0))
395 *media_type = ICE_MEDIA_BASET;
396 /* else if any PHY type is BACKPLANE, then media type is BACKPLANE */
397 else if (phy_type_low & ICE_MEDIA_BP_PHY_TYPE_LOW_M ||
398 phy_type_high & ICE_MEDIA_BP_PHY_TYPE_HIGH_M)
399 *media_type = ICE_MEDIA_BACKPLANE;
400 /* else if PHY types are only optical, or optical and C2M, then media
401 * type is FIBER
402 */
403 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
404 ICE_MEDIA_OPT_PHY_TYPE_LOW_M, 0) ||
405 (phy_type_low & ICE_MEDIA_OPT_PHY_TYPE_LOW_M &&
406 phy_type_low & ICE_MEDIA_C2M_PHY_TYPE_LOW_M))
407 *media_type = ICE_MEDIA_FIBER;
408 /* else if PHY types are only DA, or DA and C2C, then media type DA */
409 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
410 ICE_MEDIA_DAC_PHY_TYPE_LOW_M, 0) ||
411 (phy_type_low & ICE_MEDIA_DAC_PHY_TYPE_LOW_M &&
412 (phy_type_low & ICE_MEDIA_C2C_PHY_TYPE_LOW_M ||
413 phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
414 *media_type = ICE_MEDIA_DA;
415 /* else if PHY types are only C2M or only C2C, then media is AUI */
416 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
417 ICE_MEDIA_C2M_PHY_TYPE_LOW_M,
418 ICE_MEDIA_C2M_PHY_TYPE_HIGH_M) ||
419 ice_phy_maps_to_media(phy_type_low, phy_type_high,
420 ICE_MEDIA_C2C_PHY_TYPE_LOW_M,
421 ICE_MEDIA_C2C_PHY_TYPE_HIGH_M))
422 *media_type = ICE_MEDIA_AUI;
423
424 else
425 *media_type = ICE_MEDIA_UNKNOWN;
426 }
427
428 /**
429 * ice_aq_get_phy_caps - returns PHY capabilities
430 * @pi: port information structure
431 * @qual_mods: report qualified modules
432 * @report_mode: report mode capabilities
433 * @pcaps: structure for PHY capabilities to be filled
434 * @cd: pointer to command details structure or NULL
435 *
436 * Returns the various PHY capabilities supported on the Port (0x0600)
437 */
438 enum ice_status
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)439 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
440 struct ice_aqc_get_phy_caps_data *pcaps,
441 struct ice_sq_cd *cd)
442 {
443 struct ice_aqc_get_phy_caps *cmd;
444 u16 pcaps_size = sizeof(*pcaps);
445 struct ice_aq_desc desc;
446 enum ice_status status;
447 const char *prefix;
448 struct ice_hw *hw;
449
450 cmd = &desc.params.get_phy;
451
452 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
453 return ICE_ERR_PARAM;
454 hw = pi->hw;
455
456 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
457 !ice_fw_supports_report_dflt_cfg(hw))
458 return ICE_ERR_PARAM;
459
460 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
461
462 if (qual_mods)
463 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
464
465 cmd->param0 |= CPU_TO_LE16(report_mode);
466
467 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
468
469 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
470
471 switch (report_mode) {
472 case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
473 prefix = "phy_caps_media";
474 break;
475 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
476 prefix = "phy_caps_no_media";
477 break;
478 case ICE_AQC_REPORT_ACTIVE_CFG:
479 prefix = "phy_caps_active";
480 break;
481 case ICE_AQC_REPORT_DFLT_CFG:
482 prefix = "phy_caps_default";
483 break;
484 default:
485 prefix = "phy_caps_invalid";
486 }
487
488 ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low),
489 LE64_TO_CPU(pcaps->phy_type_high), prefix);
490
491 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
492 prefix, report_mode);
493 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
494 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
495 pcaps->low_power_ctrl_an);
496 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
497 pcaps->eee_cap);
498 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
499 pcaps->eeer_value);
500 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
501 pcaps->link_fec_options);
502 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
503 prefix, pcaps->module_compliance_enforcement);
504 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
505 prefix, pcaps->extended_compliance_code);
506 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
507 pcaps->module_type[0]);
508 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
509 pcaps->module_type[1]);
510 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
511 pcaps->module_type[2]);
512
513 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
514 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
515 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
516 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
517 sizeof(pi->phy.link_info.module_type),
518 ICE_NONDMA_TO_NONDMA);
519 ice_set_media_type(pi);
520 ice_debug(hw, ICE_DBG_LINK, "%s: media_type = 0x%x\n", prefix,
521 pi->phy.media_type);
522 }
523
524 return status;
525 }
526
527 /**
528 * ice_aq_get_netlist_node
529 * @hw: pointer to the hw struct
530 * @cmd: get_link_topo AQ structure
531 * @node_part_number: output node part number if node found
532 * @node_handle: output node handle parameter if node found
533 */
534 enum ice_status
ice_aq_get_netlist_node(struct ice_hw * hw,struct ice_aqc_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)535 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
536 u8 *node_part_number, u16 *node_handle)
537 {
538 struct ice_aq_desc desc;
539
540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
541 desc.params.get_link_topo = *cmd;
542
543 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
544 return ICE_ERR_NOT_SUPPORTED;
545
546 if (node_handle)
547 *node_handle =
548 LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
549 if (node_part_number)
550 *node_part_number = desc.params.get_link_topo.node_part_num;
551
552 return ICE_SUCCESS;
553 }
554
555 #define MAX_NETLIST_SIZE 10
556 /**
557 * ice_find_netlist_node
558 * @hw: pointer to the hw struct
559 * @node_type_ctx: type of netlist node to look for
560 * @node_part_number: node part number to look for
561 * @node_handle: output parameter if node found - optional
562 *
563 * Find and return the node handle for a given node type and part number in the
564 * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
565 * otherwise. If node_handle provided, it would be set to found node handle.
566 */
567 enum ice_status
ice_find_netlist_node(struct ice_hw * hw,u8 node_type_ctx,u8 node_part_number,u16 * node_handle)568 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
569 u16 *node_handle)
570 {
571 struct ice_aqc_get_link_topo cmd;
572 u8 rec_node_part_number;
573 u16 rec_node_handle;
574 u8 idx;
575
576 for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
577 enum ice_status status;
578
579 memset(&cmd, 0, sizeof(cmd));
580
581 cmd.addr.topo_params.node_type_ctx =
582 (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
583 cmd.addr.topo_params.index = idx;
584
585 status = ice_aq_get_netlist_node(hw, &cmd,
586 &rec_node_part_number,
587 &rec_node_handle);
588 if (status)
589 return status;
590
591 if (rec_node_part_number == node_part_number) {
592 if (node_handle)
593 *node_handle = rec_node_handle;
594 return ICE_SUCCESS;
595 }
596 }
597
598 return ICE_ERR_DOES_NOT_EXIST;
599 }
600
601 #define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1
602
603 /**
604 * ice_aq_get_link_info
605 * @pi: port information structure
606 * @ena_lse: enable/disable LinkStatusEvent reporting
607 * @link: pointer to link status structure - optional
608 * @cd: pointer to command details structure or NULL
609 *
610 * Get Link Status (0x607). Returns the link status of the adapter.
611 */
612 enum ice_status
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)613 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
614 struct ice_link_status *link, struct ice_sq_cd *cd)
615 {
616 struct ice_aqc_get_link_status_data link_data = { 0 };
617 struct ice_aqc_get_link_status *resp;
618 struct ice_link_status *li_old, *li;
619 struct ice_fc_info *hw_fc_info;
620 bool tx_pause, rx_pause;
621 struct ice_aq_desc desc;
622 enum ice_status status;
623 struct ice_hw *hw;
624 u16 cmd_flags;
625
626 if (!pi)
627 return ICE_ERR_PARAM;
628 hw = pi->hw;
629
630 li_old = &pi->phy.link_info_old;
631 li = &pi->phy.link_info;
632 hw_fc_info = &pi->fc;
633
634 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
635 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
636 resp = &desc.params.get_link_status;
637 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
638 resp->lport_num = pi->lport;
639
640 status = ice_aq_send_cmd(hw, &desc, &link_data,
641 ice_get_link_status_datalen(hw), cd);
642 if (status != ICE_SUCCESS)
643 return status;
644
645 /* save off old link status information */
646 *li_old = *li;
647
648 /* update current link status information */
649 li->link_speed = LE16_TO_CPU(link_data.link_speed);
650 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
651 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
652 li->link_info = link_data.link_info;
653 li->link_cfg_err = link_data.link_cfg_err;
654 li->an_info = link_data.an_info;
655 li->ext_info = link_data.ext_info;
656 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
657 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
658 li->topo_media_conflict = link_data.topo_media_conflict;
659 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
660 ICE_AQ_CFG_PACING_TYPE_M);
661
662 /* update fc info */
663 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
664 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
665 if (tx_pause && rx_pause)
666 hw_fc_info->current_mode = ICE_FC_FULL;
667 else if (tx_pause)
668 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
669 else if (rx_pause)
670 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
671 else
672 hw_fc_info->current_mode = ICE_FC_NONE;
673
674 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
675
676 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
677 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
678 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
679 (unsigned long long)li->phy_type_low);
680 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
681 (unsigned long long)li->phy_type_high);
682 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
683 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
684 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
685 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
686 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
687 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
688 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
689 li->max_frame_size);
690 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
691
692 /* save link status information */
693 if (link)
694 *link = *li;
695
696 /* flag cleared so calling functions don't call AQ again */
697 pi->phy.get_link_info = false;
698
699 return ICE_SUCCESS;
700 }
701
702 /**
703 * ice_fill_tx_timer_and_fc_thresh
704 * @hw: pointer to the HW struct
705 * @cmd: pointer to MAC cfg structure
706 *
707 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
708 * descriptor
709 */
710 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)711 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
712 struct ice_aqc_set_mac_cfg *cmd)
713 {
714 u16 fc_thres_val, tx_timer_val;
715 u32 val;
716
717 /* We read back the transmit timer and fc threshold value of
718 * LFC. Thus, we will use index =
719 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
720 *
721 * Also, because we are operating on transmit timer and fc
722 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
723 */
724 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
725
726 /* Retrieve the transmit timer */
727 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
728 tx_timer_val = val &
729 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
730 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
731
732 /* Retrieve the fc threshold */
733 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
734 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
735
736 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
737 }
738
739 /**
740 * ice_aq_set_mac_cfg
741 * @hw: pointer to the HW struct
742 * @max_frame_size: Maximum Frame Size to be supported
743 * @auto_drop: Tell HW to drop packets if TC queue is blocked
744 * @cd: pointer to command details structure or NULL
745 *
746 * Set MAC configuration (0x0603)
747 */
748 enum ice_status
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,bool auto_drop,struct ice_sq_cd * cd)749 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
750 struct ice_sq_cd *cd)
751 {
752 struct ice_aqc_set_mac_cfg *cmd;
753 struct ice_aq_desc desc;
754
755 cmd = &desc.params.set_mac_cfg;
756
757 if (max_frame_size == 0)
758 return ICE_ERR_PARAM;
759
760 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
761
762 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
763
764 if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
765 cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
766 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
767
768 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
769 }
770
771 /**
772 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
773 * @hw: pointer to the HW struct
774 */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)775 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
776 {
777 struct ice_switch_info *sw;
778 enum ice_status status;
779
780 hw->switch_info = (struct ice_switch_info *)
781 ice_malloc(hw, sizeof(*hw->switch_info));
782
783 sw = hw->switch_info;
784
785 if (!sw)
786 return ICE_ERR_NO_MEMORY;
787
788 INIT_LIST_HEAD(&sw->vsi_list_map_head);
789 sw->prof_res_bm_init = 0;
790
791 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
792 if (status) {
793 ice_free(hw, hw->switch_info);
794 return status;
795 }
796 return ICE_SUCCESS;
797 }
798
799 /**
800 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
801 * @hw: pointer to the HW struct
802 * @sw: pointer to switch info struct for which function clears filters
803 */
804 static void
ice_cleanup_fltr_mgmt_single(struct ice_hw * hw,struct ice_switch_info * sw)805 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
806 {
807 struct ice_vsi_list_map_info *v_pos_map;
808 struct ice_vsi_list_map_info *v_tmp_map;
809 struct ice_sw_recipe *recps;
810 u8 i;
811
812 if (!sw)
813 return;
814
815 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
816 ice_vsi_list_map_info, list_entry) {
817 LIST_DEL(&v_pos_map->list_entry);
818 ice_free(hw, v_pos_map);
819 }
820 recps = sw->recp_list;
821 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
822 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
823
824 recps[i].root_rid = i;
825 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
826 &recps[i].rg_list, ice_recp_grp_entry,
827 l_entry) {
828 LIST_DEL(&rg_entry->l_entry);
829 ice_free(hw, rg_entry);
830 }
831
832 if (recps[i].adv_rule) {
833 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
834 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
835
836 ice_destroy_lock(&recps[i].filt_rule_lock);
837 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
838 &recps[i].filt_rules,
839 ice_adv_fltr_mgmt_list_entry,
840 list_entry) {
841 LIST_DEL(&lst_itr->list_entry);
842 ice_free(hw, lst_itr->lkups);
843 ice_free(hw, lst_itr);
844 }
845 } else {
846 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
847
848 ice_destroy_lock(&recps[i].filt_rule_lock);
849 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
850 &recps[i].filt_rules,
851 ice_fltr_mgmt_list_entry,
852 list_entry) {
853 LIST_DEL(&lst_itr->list_entry);
854 ice_free(hw, lst_itr);
855 }
856 }
857 if (recps[i].root_buf)
858 ice_free(hw, recps[i].root_buf);
859 }
860 ice_rm_sw_replay_rule_info(hw, sw);
861 ice_free(hw, sw->recp_list);
862 ice_free(hw, sw);
863 }
864
865 /**
866 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
867 * @hw: pointer to the HW struct
868 */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)869 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
870 {
871 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
872 }
873
874 /**
875 * ice_get_itr_intrl_gran
876 * @hw: pointer to the HW struct
877 *
878 * Determines the ITR/INTRL granularities based on the maximum aggregate
879 * bandwidth according to the device's configuration during power-on.
880 */
ice_get_itr_intrl_gran(struct ice_hw * hw)881 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
882 {
883 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
884 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
885 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
886
887 switch (max_agg_bw) {
888 case ICE_MAX_AGG_BW_200G:
889 case ICE_MAX_AGG_BW_100G:
890 case ICE_MAX_AGG_BW_50G:
891 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
892 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
893 break;
894 case ICE_MAX_AGG_BW_25G:
895 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
896 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
897 break;
898 }
899 }
900
901 /**
902 * ice_print_rollback_msg - print FW rollback message
903 * @hw: pointer to the hardware structure
904 */
ice_print_rollback_msg(struct ice_hw * hw)905 void ice_print_rollback_msg(struct ice_hw *hw)
906 {
907 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
908 struct ice_orom_info *orom;
909 struct ice_nvm_info *nvm;
910
911 orom = &hw->flash.orom;
912 nvm = &hw->flash.nvm;
913
914 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
915 nvm->major, nvm->minor, nvm->eetrack, orom->major,
916 orom->build, orom->patch);
917 ice_warn(hw,
918 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
919 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
920 }
921
922 /**
923 * ice_set_umac_shared
924 * @hw: pointer to the hw struct
925 *
926 * Set boolean flag to allow unicast MAC sharing
927 */
ice_set_umac_shared(struct ice_hw * hw)928 void ice_set_umac_shared(struct ice_hw *hw)
929 {
930 hw->umac_shared = true;
931 }
932
933 /**
934 * ice_init_hw - main hardware initialization routine
935 * @hw: pointer to the hardware structure
936 */
ice_init_hw(struct ice_hw * hw)937 enum ice_status ice_init_hw(struct ice_hw *hw)
938 {
939 struct ice_aqc_get_phy_caps_data *pcaps;
940 enum ice_status status;
941 u16 mac_buf_len;
942 void *mac_buf;
943
944 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
945
946 /* Set MAC type based on DeviceID */
947 status = ice_set_mac_type(hw);
948 if (status)
949 return status;
950
951 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
952 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
953 PF_FUNC_RID_FUNCTION_NUMBER_S;
954
955 status = ice_reset(hw, ICE_RESET_PFR);
956 if (status)
957 return status;
958 ice_get_itr_intrl_gran(hw);
959
960 status = ice_create_all_ctrlq(hw);
961 if (status)
962 goto err_unroll_cqinit;
963
964 ice_fwlog_set_support_ena(hw);
965 status = ice_fwlog_set(hw, &hw->fwlog_cfg);
966 if (status) {
967 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n",
968 status);
969 } else {
970 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) {
971 status = ice_fwlog_register(hw);
972 if (status)
973 ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n",
974 status);
975 } else {
976 status = ice_fwlog_unregister(hw);
977 if (status)
978 ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n",
979 status);
980 }
981 }
982
983 status = ice_init_nvm(hw);
984 if (status)
985 goto err_unroll_cqinit;
986
987 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
988 ice_print_rollback_msg(hw);
989
990 status = ice_clear_pf_cfg(hw);
991 if (status)
992 goto err_unroll_cqinit;
993
994 ice_clear_pxe_mode(hw);
995
996 status = ice_get_caps(hw);
997 if (status)
998 goto err_unroll_cqinit;
999
1000 if (!hw->port_info)
1001 hw->port_info = (struct ice_port_info *)
1002 ice_malloc(hw, sizeof(*hw->port_info));
1003 if (!hw->port_info) {
1004 status = ICE_ERR_NO_MEMORY;
1005 goto err_unroll_cqinit;
1006 }
1007
1008 /* set the back pointer to HW */
1009 hw->port_info->hw = hw;
1010
1011 /* Initialize port_info struct with switch configuration data */
1012 status = ice_get_initial_sw_cfg(hw);
1013 if (status)
1014 goto err_unroll_alloc;
1015
1016 hw->evb_veb = true;
1017 /* Query the allocated resources for Tx scheduler */
1018 status = ice_sched_query_res_alloc(hw);
1019 if (status) {
1020 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1021 goto err_unroll_alloc;
1022 }
1023 ice_sched_get_psm_clk_freq(hw);
1024
1025 /* Initialize port_info struct with scheduler data */
1026 status = ice_sched_init_port(hw->port_info);
1027 if (status)
1028 goto err_unroll_sched;
1029 pcaps = (struct ice_aqc_get_phy_caps_data *)
1030 ice_malloc(hw, sizeof(*pcaps));
1031 if (!pcaps) {
1032 status = ICE_ERR_NO_MEMORY;
1033 goto err_unroll_sched;
1034 }
1035
1036 /* Initialize port_info struct with PHY capabilities */
1037 status = ice_aq_get_phy_caps(hw->port_info, false,
1038 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
1039 ice_free(hw, pcaps);
1040 if (status)
1041 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
1042 status);
1043
1044 /* Initialize port_info struct with link information */
1045 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1046 if (status)
1047 goto err_unroll_sched;
1048 /* need a valid SW entry point to build a Tx tree */
1049 if (!hw->sw_entry_point_layer) {
1050 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1051 status = ICE_ERR_CFG;
1052 goto err_unroll_sched;
1053 }
1054 INIT_LIST_HEAD(&hw->agg_list);
1055 /* Initialize max burst size */
1056 if (!hw->max_burst_size)
1057 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1058 status = ice_init_fltr_mgmt_struct(hw);
1059 if (status)
1060 goto err_unroll_sched;
1061
1062 /* Get MAC information */
1063
1064 /* A single port can report up to two (LAN and WoL) addresses */
1065 mac_buf = ice_calloc(hw, 2,
1066 sizeof(struct ice_aqc_manage_mac_read_resp));
1067 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1068
1069 if (!mac_buf) {
1070 status = ICE_ERR_NO_MEMORY;
1071 goto err_unroll_fltr_mgmt_struct;
1072 }
1073
1074 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1075 ice_free(hw, mac_buf);
1076
1077 if (status)
1078 goto err_unroll_fltr_mgmt_struct;
1079
1080 /* enable jumbo frame support at MAC level */
1081 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
1082 NULL);
1083 if (status)
1084 goto err_unroll_fltr_mgmt_struct;
1085
1086 status = ice_init_hw_tbls(hw);
1087 if (status)
1088 goto err_unroll_fltr_mgmt_struct;
1089 ice_init_lock(&hw->tnl_lock);
1090
1091 return ICE_SUCCESS;
1092
1093 err_unroll_fltr_mgmt_struct:
1094 ice_cleanup_fltr_mgmt_struct(hw);
1095 err_unroll_sched:
1096 ice_sched_cleanup_all(hw);
1097 err_unroll_alloc:
1098 ice_free(hw, hw->port_info);
1099 hw->port_info = NULL;
1100 err_unroll_cqinit:
1101 ice_destroy_all_ctrlq(hw);
1102 return status;
1103 }
1104
1105 /**
1106 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1107 * @hw: pointer to the hardware structure
1108 *
1109 * This should be called only during nominal operation, not as a result of
1110 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1111 * applicable initializations if it fails for any reason.
1112 */
ice_deinit_hw(struct ice_hw * hw)1113 void ice_deinit_hw(struct ice_hw *hw)
1114 {
1115 ice_cleanup_fltr_mgmt_struct(hw);
1116
1117 ice_sched_cleanup_all(hw);
1118 ice_sched_clear_agg(hw);
1119 ice_free_seg(hw);
1120 ice_free_hw_tbls(hw);
1121 ice_destroy_lock(&hw->tnl_lock);
1122
1123 if (hw->port_info) {
1124 ice_free(hw, hw->port_info);
1125 hw->port_info = NULL;
1126 }
1127
1128 ice_destroy_all_ctrlq(hw);
1129
1130 /* Clear VSI contexts if not already cleared */
1131 ice_clear_all_vsi_ctx(hw);
1132 }
1133
1134 /**
1135 * ice_check_reset - Check to see if a global reset is complete
1136 * @hw: pointer to the hardware structure
1137 */
ice_check_reset(struct ice_hw * hw)1138 enum ice_status ice_check_reset(struct ice_hw *hw)
1139 {
1140 u32 cnt, reg = 0, grst_timeout, uld_mask, reset_wait_cnt;
1141
1142 /* Poll for Device Active state in case a recent CORER, GLOBR,
1143 * or EMPR has occurred. The grst delay value is in 100ms units.
1144 * Add 1sec for outstanding AQ commands that can take a long time.
1145 */
1146 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1147 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1148
1149 for (cnt = 0; cnt < grst_timeout; cnt++) {
1150 ice_msec_delay(100, true);
1151 reg = rd32(hw, GLGEN_RSTAT);
1152 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1153 break;
1154 }
1155
1156 if (cnt == grst_timeout) {
1157 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1158 return ICE_ERR_RESET_FAILED;
1159 }
1160
1161 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1162 GLNVM_ULD_PCIER_DONE_1_M |\
1163 GLNVM_ULD_CORER_DONE_M |\
1164 GLNVM_ULD_GLOBR_DONE_M |\
1165 GLNVM_ULD_POR_DONE_M |\
1166 GLNVM_ULD_POR_DONE_1_M |\
1167 GLNVM_ULD_PCIER_DONE_2_M)
1168
1169 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ?
1170 GLNVM_ULD_PE_DONE_M : 0);
1171
1172 reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT;
1173
1174 /* Device is Active; check Global Reset processes are done */
1175 for (cnt = 0; cnt < reset_wait_cnt; cnt++) {
1176 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1177 if (reg == uld_mask) {
1178 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1179 break;
1180 }
1181 ice_msec_delay(10, true);
1182 }
1183
1184 if (cnt == reset_wait_cnt) {
1185 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1186 reg);
1187 return ICE_ERR_RESET_FAILED;
1188 }
1189
1190 return ICE_SUCCESS;
1191 }
1192
1193 /**
1194 * ice_pf_reset - Reset the PF
1195 * @hw: pointer to the hardware structure
1196 *
1197 * If a global reset has been triggered, this function checks
1198 * for its completion and then issues the PF reset
1199 */
ice_pf_reset(struct ice_hw * hw)1200 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1201 {
1202 u32 cnt, reg, reset_wait_cnt, cfg_lock_timeout;
1203
1204 /* If at function entry a global reset was already in progress, i.e.
1205 * state is not 'device active' or any of the reset done bits are not
1206 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1207 * global reset is done.
1208 */
1209 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1210 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1211 /* poll on global reset currently in progress until done */
1212 if (ice_check_reset(hw))
1213 return ICE_ERR_RESET_FAILED;
1214
1215 return ICE_SUCCESS;
1216 }
1217
1218 /* Reset the PF */
1219 reg = rd32(hw, PFGEN_CTRL);
1220
1221 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1222
1223 /* Wait for the PFR to complete. The wait time is the global config lock
1224 * timeout plus the PFR timeout which will account for a possible reset
1225 * that is occurring during a download package operation.
1226 */
1227 reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT;
1228 cfg_lock_timeout = ICE_GLOBAL_CFG_LOCK_TIMEOUT;
1229
1230 for (cnt = 0; cnt < cfg_lock_timeout + reset_wait_cnt; cnt++) {
1231 reg = rd32(hw, PFGEN_CTRL);
1232 if (!(reg & PFGEN_CTRL_PFSWR_M))
1233 break;
1234
1235 ice_msec_delay(1, true);
1236 }
1237
1238 if (cnt == cfg_lock_timeout + reset_wait_cnt) {
1239 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1240 return ICE_ERR_RESET_FAILED;
1241 }
1242
1243 return ICE_SUCCESS;
1244 }
1245
1246 /**
1247 * ice_reset - Perform different types of reset
1248 * @hw: pointer to the hardware structure
1249 * @req: reset request
1250 *
1251 * This function triggers a reset as specified by the req parameter.
1252 *
1253 * Note:
1254 * If anything other than a PF reset is triggered, PXE mode is restored.
1255 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1256 * interface has been restored in the rebuild flow.
1257 */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)1258 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1259 {
1260 u32 val = 0;
1261
1262 switch (req) {
1263 case ICE_RESET_PFR:
1264 return ice_pf_reset(hw);
1265 case ICE_RESET_CORER:
1266 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1267 val = GLGEN_RTRIG_CORER_M;
1268 break;
1269 case ICE_RESET_GLOBR:
1270 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1271 val = GLGEN_RTRIG_GLOBR_M;
1272 break;
1273 default:
1274 return ICE_ERR_PARAM;
1275 }
1276
1277 val |= rd32(hw, GLGEN_RTRIG);
1278 wr32(hw, GLGEN_RTRIG, val);
1279 ice_flush(hw);
1280
1281 /* wait for the FW to be ready */
1282 return ice_check_reset(hw);
1283 }
1284
1285 /**
1286 * ice_copy_rxq_ctx_to_hw
1287 * @hw: pointer to the hardware structure
1288 * @ice_rxq_ctx: pointer to the rxq context
1289 * @rxq_index: the index of the Rx queue
1290 *
1291 * Copies rxq context from dense structure to HW register space
1292 */
1293 static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1294 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1295 {
1296 u8 i;
1297
1298 if (!ice_rxq_ctx)
1299 return ICE_ERR_BAD_PTR;
1300
1301 if (rxq_index > QRX_CTRL_MAX_INDEX)
1302 return ICE_ERR_PARAM;
1303
1304 /* Copy each dword separately to HW */
1305 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1306 wr32(hw, QRX_CONTEXT(i, rxq_index),
1307 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1308
1309 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1310 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1311 }
1312
1313 return ICE_SUCCESS;
1314 }
1315
1316 /**
1317 * ice_copy_rxq_ctx_from_hw - Copy rxq context register from HW
1318 * @hw: pointer to the hardware structure
1319 * @ice_rxq_ctx: pointer to the rxq context
1320 * @rxq_index: the index of the Rx queue
1321 *
1322 * Copies rxq context from HW register space to dense structure
1323 */
1324 static enum ice_status
ice_copy_rxq_ctx_from_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1325 ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1326 {
1327 u8 i;
1328
1329 if (!ice_rxq_ctx)
1330 return ICE_ERR_BAD_PTR;
1331
1332 if (rxq_index > QRX_CTRL_MAX_INDEX)
1333 return ICE_ERR_PARAM;
1334
1335 /* Copy each dword separately from HW */
1336 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1337 u32 *ctx = (u32 *)(ice_rxq_ctx + (i * sizeof(u32)));
1338
1339 *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
1340
1341 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
1342 }
1343
1344 return ICE_SUCCESS;
1345 }
1346
1347 /* LAN Rx Queue Context */
1348 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1349 /* Field Width LSB */
1350 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1351 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1352 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1353 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1354 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1355 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1356 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1357 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1358 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1359 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1360 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1361 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1362 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1363 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1364 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1365 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1366 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1367 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1368 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1369 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1370 { 0 }
1371 };
1372
1373 /**
1374 * ice_write_rxq_ctx
1375 * @hw: pointer to the hardware structure
1376 * @rlan_ctx: pointer to the rxq context
1377 * @rxq_index: the index of the Rx queue
1378 *
1379 * Converts rxq context from sparse to dense structure and then writes
1380 * it to HW register space and enables the hardware to prefetch descriptors
1381 * instead of only fetching them on demand
1382 */
1383 enum ice_status
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1384 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1385 u32 rxq_index)
1386 {
1387 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1388
1389 if (!rlan_ctx)
1390 return ICE_ERR_BAD_PTR;
1391
1392 rlan_ctx->prefena = 1;
1393
1394 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1395 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1396 }
1397
1398 /**
1399 * ice_read_rxq_ctx - Read rxq context from HW
1400 * @hw: pointer to the hardware structure
1401 * @rlan_ctx: pointer to the rxq context
1402 * @rxq_index: the index of the Rx queue
1403 *
1404 * Read rxq context from HW register space and then converts it from dense
1405 * structure to sparse
1406 */
1407 enum ice_status
ice_read_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1408 ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1409 u32 rxq_index)
1410 {
1411 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1412 enum ice_status status;
1413
1414 if (!rlan_ctx)
1415 return ICE_ERR_BAD_PTR;
1416
1417 status = ice_copy_rxq_ctx_from_hw(hw, ctx_buf, rxq_index);
1418 if (status)
1419 return status;
1420
1421 return ice_get_ctx(ctx_buf, (u8 *)rlan_ctx, ice_rlan_ctx_info);
1422 }
1423
1424 /**
1425 * ice_clear_rxq_ctx
1426 * @hw: pointer to the hardware structure
1427 * @rxq_index: the index of the Rx queue to clear
1428 *
1429 * Clears rxq context in HW register space
1430 */
ice_clear_rxq_ctx(struct ice_hw * hw,u32 rxq_index)1431 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1432 {
1433 u8 i;
1434
1435 if (rxq_index > QRX_CTRL_MAX_INDEX)
1436 return ICE_ERR_PARAM;
1437
1438 /* Clear each dword register separately */
1439 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1440 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1441
1442 return ICE_SUCCESS;
1443 }
1444
1445 /* LAN Tx Queue Context used for set Tx config by ice_aqc_opc_add_txqs,
1446 * Bit[0-175] is valid
1447 */
1448 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1449 /* Field Width LSB */
1450 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1451 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1452 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1453 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1454 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1455 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1456 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1457 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1458 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1459 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1460 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1461 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1462 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1463 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1464 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1465 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1466 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1467 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1468 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1469 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1470 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1471 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1472 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1473 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1474 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1475 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1476 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1477 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1478 { 0 }
1479 };
1480
1481 /**
1482 * ice_copy_tx_cmpltnq_ctx_to_hw
1483 * @hw: pointer to the hardware structure
1484 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1485 * @tx_cmpltnq_index: the index of the completion queue
1486 *
1487 * Copies Tx completion queue context from dense structure to HW register space
1488 */
1489 static enum ice_status
ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1490 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1491 u32 tx_cmpltnq_index)
1492 {
1493 u8 i;
1494
1495 if (!ice_tx_cmpltnq_ctx)
1496 return ICE_ERR_BAD_PTR;
1497
1498 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1499 return ICE_ERR_PARAM;
1500
1501 /* Copy each dword separately to HW */
1502 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1503 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1504 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1505
1506 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1507 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1508 }
1509
1510 return ICE_SUCCESS;
1511 }
1512
1513 /* LAN Tx Completion Queue Context */
1514 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1515 /* Field Width LSB */
1516 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1517 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1518 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1519 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1520 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1521 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1522 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1523 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1524 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1525 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1526 { 0 }
1527 };
1528
1529 /**
1530 * ice_write_tx_cmpltnq_ctx
1531 * @hw: pointer to the hardware structure
1532 * @tx_cmpltnq_ctx: pointer to the completion queue context
1533 * @tx_cmpltnq_index: the index of the completion queue
1534 *
1535 * Converts completion queue context from sparse to dense structure and then
1536 * writes it to HW register space
1537 */
1538 enum ice_status
ice_write_tx_cmpltnq_ctx(struct ice_hw * hw,struct ice_tx_cmpltnq_ctx * tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1539 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1540 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1541 u32 tx_cmpltnq_index)
1542 {
1543 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1544
1545 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1546 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1547 }
1548
1549 /**
1550 * ice_clear_tx_cmpltnq_ctx
1551 * @hw: pointer to the hardware structure
1552 * @tx_cmpltnq_index: the index of the completion queue to clear
1553 *
1554 * Clears Tx completion queue context in HW register space
1555 */
1556 enum ice_status
ice_clear_tx_cmpltnq_ctx(struct ice_hw * hw,u32 tx_cmpltnq_index)1557 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1558 {
1559 u8 i;
1560
1561 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1562 return ICE_ERR_PARAM;
1563
1564 /* Clear each dword register separately */
1565 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1566 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1567
1568 return ICE_SUCCESS;
1569 }
1570
1571 /**
1572 * ice_copy_tx_drbell_q_ctx_to_hw
1573 * @hw: pointer to the hardware structure
1574 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1575 * @tx_drbell_q_index: the index of the doorbell queue
1576 *
1577 * Copies doorbell queue context from dense structure to HW register space
1578 */
1579 static enum ice_status
ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_drbell_q_ctx,u32 tx_drbell_q_index)1580 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1581 u32 tx_drbell_q_index)
1582 {
1583 u8 i;
1584
1585 if (!ice_tx_drbell_q_ctx)
1586 return ICE_ERR_BAD_PTR;
1587
1588 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1589 return ICE_ERR_PARAM;
1590
1591 /* Copy each dword separately to HW */
1592 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1593 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1594 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1595
1596 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1597 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1598 }
1599
1600 return ICE_SUCCESS;
1601 }
1602
1603 /* LAN Tx Doorbell Queue Context info */
1604 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1605 /* Field Width LSB */
1606 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1607 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1608 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1609 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1610 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1611 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1612 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1613 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1614 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1615 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1616 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1617 { 0 }
1618 };
1619
1620 /**
1621 * ice_write_tx_drbell_q_ctx
1622 * @hw: pointer to the hardware structure
1623 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1624 * @tx_drbell_q_index: the index of the doorbell queue
1625 *
1626 * Converts doorbell queue context from sparse to dense structure and then
1627 * writes it to HW register space
1628 */
1629 enum ice_status
ice_write_tx_drbell_q_ctx(struct ice_hw * hw,struct ice_tx_drbell_q_ctx * tx_drbell_q_ctx,u32 tx_drbell_q_index)1630 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1631 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1632 u32 tx_drbell_q_index)
1633 {
1634 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1635
1636 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1637 ice_tx_drbell_q_ctx_info);
1638 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1639 }
1640
1641 /**
1642 * ice_clear_tx_drbell_q_ctx
1643 * @hw: pointer to the hardware structure
1644 * @tx_drbell_q_index: the index of the doorbell queue to clear
1645 *
1646 * Clears doorbell queue context in HW register space
1647 */
1648 enum ice_status
ice_clear_tx_drbell_q_ctx(struct ice_hw * hw,u32 tx_drbell_q_index)1649 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1650 {
1651 u8 i;
1652
1653 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1654 return ICE_ERR_PARAM;
1655
1656 /* Clear each dword register separately */
1657 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1658 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1659
1660 return ICE_SUCCESS;
1661 }
1662
1663 /* FW Admin Queue command wrappers */
1664
1665 /**
1666 * ice_should_retry_sq_send_cmd
1667 * @opcode: AQ opcode
1668 *
1669 * Decide if we should retry the send command routine for the ATQ, depending
1670 * on the opcode.
1671 */
ice_should_retry_sq_send_cmd(u16 opcode)1672 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1673 {
1674 switch (opcode) {
1675 case ice_aqc_opc_dnl_get_status:
1676 case ice_aqc_opc_dnl_run:
1677 case ice_aqc_opc_dnl_call:
1678 case ice_aqc_opc_dnl_read_sto:
1679 case ice_aqc_opc_dnl_write_sto:
1680 case ice_aqc_opc_dnl_set_breakpoints:
1681 case ice_aqc_opc_dnl_read_log:
1682 case ice_aqc_opc_get_link_topo:
1683 case ice_aqc_opc_done_alt_write:
1684 case ice_aqc_opc_lldp_stop:
1685 case ice_aqc_opc_lldp_start:
1686 case ice_aqc_opc_lldp_filter_ctrl:
1687 return true;
1688 }
1689
1690 return false;
1691 }
1692
1693 /**
1694 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1695 * @hw: pointer to the HW struct
1696 * @cq: pointer to the specific Control queue
1697 * @desc: prefilled descriptor describing the command
1698 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1699 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1700 * @cd: pointer to command details structure
1701 *
1702 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1703 * Queue if the EBUSY AQ error is returned.
1704 */
1705 static enum ice_status
ice_sq_send_cmd_retry(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1706 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1707 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1708 struct ice_sq_cd *cd)
1709 {
1710 struct ice_aq_desc desc_cpy;
1711 enum ice_status status;
1712 bool is_cmd_for_retry;
1713 u8 *buf_cpy = NULL;
1714 u8 idx = 0;
1715 u16 opcode;
1716
1717 opcode = LE16_TO_CPU(desc->opcode);
1718 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1719 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1720
1721 if (is_cmd_for_retry) {
1722 if (buf) {
1723 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1724 if (!buf_cpy)
1725 return ICE_ERR_NO_MEMORY;
1726 }
1727
1728 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1729 ICE_NONDMA_TO_NONDMA);
1730 }
1731
1732 do {
1733 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1734
1735 if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1736 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1737 break;
1738
1739 if (buf_cpy)
1740 ice_memcpy(buf, buf_cpy, buf_size,
1741 ICE_NONDMA_TO_NONDMA);
1742
1743 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1744 ICE_NONDMA_TO_NONDMA);
1745
1746 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1747
1748 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1749
1750 if (buf_cpy)
1751 ice_free(hw, buf_cpy);
1752
1753 return status;
1754 }
1755
1756 /**
1757 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1758 * @hw: pointer to the HW struct
1759 * @desc: descriptor describing the command
1760 * @buf: buffer to use for indirect commands (NULL for direct commands)
1761 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1762 * @cd: pointer to command details structure
1763 *
1764 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1765 */
1766 enum ice_status
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1767 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1768 u16 buf_size, struct ice_sq_cd *cd)
1769 {
1770 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1771 }
1772
1773 /**
1774 * ice_aq_get_fw_ver
1775 * @hw: pointer to the HW struct
1776 * @cd: pointer to command details structure or NULL
1777 *
1778 * Get the firmware version (0x0001) from the admin queue commands
1779 */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1780 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1781 {
1782 struct ice_aqc_get_ver *resp;
1783 struct ice_aq_desc desc;
1784 enum ice_status status;
1785
1786 resp = &desc.params.get_ver;
1787
1788 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1789
1790 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1791
1792 if (!status) {
1793 hw->fw_branch = resp->fw_branch;
1794 hw->fw_maj_ver = resp->fw_major;
1795 hw->fw_min_ver = resp->fw_minor;
1796 hw->fw_patch = resp->fw_patch;
1797 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1798 hw->api_branch = resp->api_branch;
1799 hw->api_maj_ver = resp->api_major;
1800 hw->api_min_ver = resp->api_minor;
1801 hw->api_patch = resp->api_patch;
1802 }
1803
1804 return status;
1805 }
1806
1807 /**
1808 * ice_aq_send_driver_ver
1809 * @hw: pointer to the HW struct
1810 * @dv: driver's major, minor version
1811 * @cd: pointer to command details structure or NULL
1812 *
1813 * Send the driver version (0x0002) to the firmware
1814 */
1815 enum ice_status
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)1816 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1817 struct ice_sq_cd *cd)
1818 {
1819 struct ice_aqc_driver_ver *cmd;
1820 struct ice_aq_desc desc;
1821 u16 len;
1822
1823 cmd = &desc.params.driver_ver;
1824
1825 if (!dv)
1826 return ICE_ERR_PARAM;
1827
1828 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1829
1830 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1831 cmd->major_ver = dv->major_ver;
1832 cmd->minor_ver = dv->minor_ver;
1833 cmd->build_ver = dv->build_ver;
1834 cmd->subbuild_ver = dv->subbuild_ver;
1835
1836 len = 0;
1837 while (len < sizeof(dv->driver_string) &&
1838 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1839 len++;
1840
1841 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1842 }
1843
1844 /**
1845 * ice_aq_q_shutdown
1846 * @hw: pointer to the HW struct
1847 * @unloading: is the driver unloading itself
1848 *
1849 * Tell the Firmware that we're shutting down the AdminQ and whether
1850 * or not the driver is unloading as well (0x0003).
1851 */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)1852 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1853 {
1854 struct ice_aqc_q_shutdown *cmd;
1855 struct ice_aq_desc desc;
1856
1857 cmd = &desc.params.q_shutdown;
1858
1859 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1860
1861 if (unloading)
1862 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1863
1864 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1865 }
1866
1867 /**
1868 * ice_aq_req_res
1869 * @hw: pointer to the HW struct
1870 * @res: resource ID
1871 * @access: access type
1872 * @sdp_number: resource number
1873 * @timeout: the maximum time in ms that the driver may hold the resource
1874 * @cd: pointer to command details structure or NULL
1875 *
1876 * Requests common resource using the admin queue commands (0x0008).
1877 * When attempting to acquire the Global Config Lock, the driver can
1878 * learn of three states:
1879 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1880 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1881 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1882 * successfully downloaded the package; the driver does
1883 * not have to download the package and can continue
1884 * loading
1885 *
1886 * Note that if the caller is in an acquire lock, perform action, release lock
1887 * phase of operation, it is possible that the FW may detect a timeout and issue
1888 * a CORER. In this case, the driver will receive a CORER interrupt and will
1889 * have to determine its cause. The calling thread that is handling this flow
1890 * will likely get an error propagated back to it indicating the Download
1891 * Package, Update Package or the Release Resource AQ commands timed out.
1892 */
1893 static enum ice_status
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)1894 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1895 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1896 struct ice_sq_cd *cd)
1897 {
1898 struct ice_aqc_req_res *cmd_resp;
1899 struct ice_aq_desc desc;
1900 enum ice_status status;
1901
1902 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1903
1904 cmd_resp = &desc.params.res_owner;
1905
1906 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1907
1908 cmd_resp->res_id = CPU_TO_LE16(res);
1909 cmd_resp->access_type = CPU_TO_LE16(access);
1910 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1911 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1912 *timeout = 0;
1913
1914 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1915
1916 /* The completion specifies the maximum time in ms that the driver
1917 * may hold the resource in the Timeout field.
1918 */
1919
1920 /* Global config lock response utilizes an additional status field.
1921 *
1922 * If the Global config lock resource is held by some other driver, the
1923 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1924 * and the timeout field indicates the maximum time the current owner
1925 * of the resource has to free it.
1926 */
1927 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1928 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1929 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1930 return ICE_SUCCESS;
1931 } else if (LE16_TO_CPU(cmd_resp->status) ==
1932 ICE_AQ_RES_GLBL_IN_PROG) {
1933 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1934 return ICE_ERR_AQ_ERROR;
1935 } else if (LE16_TO_CPU(cmd_resp->status) ==
1936 ICE_AQ_RES_GLBL_DONE) {
1937 return ICE_ERR_AQ_NO_WORK;
1938 }
1939
1940 /* invalid FW response, force a timeout immediately */
1941 *timeout = 0;
1942 return ICE_ERR_AQ_ERROR;
1943 }
1944
1945 /* If the resource is held by some other driver, the command completes
1946 * with a busy return value and the timeout field indicates the maximum
1947 * time the current owner of the resource has to free it.
1948 */
1949 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1950 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1951
1952 return status;
1953 }
1954
1955 /**
1956 * ice_aq_release_res
1957 * @hw: pointer to the HW struct
1958 * @res: resource ID
1959 * @sdp_number: resource number
1960 * @cd: pointer to command details structure or NULL
1961 *
1962 * release common resource using the admin queue commands (0x0009)
1963 */
1964 static enum ice_status
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)1965 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1966 struct ice_sq_cd *cd)
1967 {
1968 struct ice_aqc_req_res *cmd;
1969 struct ice_aq_desc desc;
1970
1971 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1972
1973 cmd = &desc.params.res_owner;
1974
1975 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1976
1977 cmd->res_id = CPU_TO_LE16(res);
1978 cmd->res_number = CPU_TO_LE32(sdp_number);
1979
1980 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1981 }
1982
1983 /**
1984 * ice_acquire_res
1985 * @hw: pointer to the HW structure
1986 * @res: resource ID
1987 * @access: access type (read or write)
1988 * @timeout: timeout in milliseconds
1989 *
1990 * This function will attempt to acquire the ownership of a resource.
1991 */
1992 enum ice_status
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)1993 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1994 enum ice_aq_res_access_type access, u32 timeout)
1995 {
1996 #define ICE_RES_POLLING_DELAY_MS 10
1997 u32 delay = ICE_RES_POLLING_DELAY_MS;
1998 u32 time_left = timeout;
1999 enum ice_status status;
2000
2001 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2002
2003 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2004
2005 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
2006 * previously acquired the resource and performed any necessary updates;
2007 * in this case the caller does not obtain the resource and has no
2008 * further work to do.
2009 */
2010 if (status == ICE_ERR_AQ_NO_WORK)
2011 goto ice_acquire_res_exit;
2012
2013 if (status)
2014 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
2015
2016 /* If necessary, poll until the current lock owner timeouts */
2017 timeout = time_left;
2018 while (status && timeout && time_left) {
2019 ice_msec_delay(delay, true);
2020 timeout = (timeout > delay) ? timeout - delay : 0;
2021 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2022
2023 if (status == ICE_ERR_AQ_NO_WORK)
2024 /* lock free, but no work to do */
2025 break;
2026
2027 if (!status)
2028 /* lock acquired */
2029 break;
2030 }
2031 if (status && status != ICE_ERR_AQ_NO_WORK)
2032 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2033
2034 ice_acquire_res_exit:
2035 if (status == ICE_ERR_AQ_NO_WORK) {
2036 if (access == ICE_RES_WRITE)
2037 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2038 else
2039 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2040 }
2041 return status;
2042 }
2043
2044 /**
2045 * ice_release_res
2046 * @hw: pointer to the HW structure
2047 * @res: resource ID
2048 *
2049 * This function will release a resource using the proper Admin Command.
2050 */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)2051 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2052 {
2053 enum ice_status status;
2054 u32 total_delay = 0;
2055
2056 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2057
2058 status = ice_aq_release_res(hw, res, 0, NULL);
2059
2060 /* there are some rare cases when trying to release the resource
2061 * results in an admin queue timeout, so handle them correctly
2062 */
2063 while ((status == ICE_ERR_AQ_TIMEOUT) &&
2064 (total_delay < hw->adminq.sq_cmd_timeout)) {
2065 ice_msec_delay(1, true);
2066 status = ice_aq_release_res(hw, res, 0, NULL);
2067 total_delay++;
2068 }
2069 }
2070
2071 /**
2072 * ice_aq_alloc_free_res - command to allocate/free resources
2073 * @hw: pointer to the HW struct
2074 * @num_entries: number of resource entries in buffer
2075 * @buf: Indirect buffer to hold data parameters and response
2076 * @buf_size: size of buffer for indirect commands
2077 * @opc: pass in the command opcode
2078 * @cd: pointer to command details structure or NULL
2079 *
2080 * Helper function to allocate/free resources using the admin queue commands
2081 */
2082 enum ice_status
ice_aq_alloc_free_res(struct ice_hw * hw,u16 num_entries,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2083 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2084 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2085 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2086 {
2087 struct ice_aqc_alloc_free_res_cmd *cmd;
2088 struct ice_aq_desc desc;
2089
2090 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2091
2092 cmd = &desc.params.sw_res_ctrl;
2093
2094 if (!buf)
2095 return ICE_ERR_PARAM;
2096
2097 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
2098 return ICE_ERR_PARAM;
2099
2100 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2101
2102 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2103
2104 cmd->num_entries = CPU_TO_LE16(num_entries);
2105
2106 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2107 }
2108
2109 /**
2110 * ice_alloc_hw_res - allocate resource
2111 * @hw: pointer to the HW struct
2112 * @type: type of resource
2113 * @num: number of resources to allocate
2114 * @btm: allocate from bottom
2115 * @res: pointer to array that will receive the resources
2116 */
2117 enum ice_status
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)2118 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2119 {
2120 struct ice_aqc_alloc_free_res_elem *buf;
2121 enum ice_status status;
2122 u16 buf_len;
2123
2124 buf_len = ice_struct_size(buf, elem, num);
2125 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2126 if (!buf)
2127 return ICE_ERR_NO_MEMORY;
2128
2129 /* Prepare buffer to allocate resource. */
2130 buf->num_elems = CPU_TO_LE16(num);
2131 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2132 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2133 if (btm)
2134 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2135
2136 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2137 ice_aqc_opc_alloc_res, NULL);
2138 if (status)
2139 goto ice_alloc_res_exit;
2140
2141 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2142 ICE_NONDMA_TO_NONDMA);
2143
2144 ice_alloc_res_exit:
2145 ice_free(hw, buf);
2146 return status;
2147 }
2148
2149 /**
2150 * ice_free_hw_res - free allocated HW resource
2151 * @hw: pointer to the HW struct
2152 * @type: type of resource to free
2153 * @num: number of resources
2154 * @res: pointer to array that contains the resources to free
2155 */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)2156 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2157 {
2158 struct ice_aqc_alloc_free_res_elem *buf;
2159 enum ice_status status;
2160 u16 buf_len;
2161
2162 buf_len = ice_struct_size(buf, elem, num);
2163 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2164 if (!buf)
2165 return ICE_ERR_NO_MEMORY;
2166
2167 /* Prepare buffer to free resource. */
2168 buf->num_elems = CPU_TO_LE16(num);
2169 buf->res_type = CPU_TO_LE16(type);
2170 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2171 ICE_NONDMA_TO_NONDMA);
2172
2173 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2174 ice_aqc_opc_free_res, NULL);
2175 if (status)
2176 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2177
2178 ice_free(hw, buf);
2179 return status;
2180 }
2181
2182 /**
2183 * ice_get_num_per_func - determine number of resources per PF
2184 * @hw: pointer to the HW structure
2185 * @max: value to be evenly split between each PF
2186 *
2187 * Determine the number of valid functions by going through the bitmap returned
2188 * from parsing capabilities and use this to calculate the number of resources
2189 * per PF based on the max value passed in.
2190 */
ice_get_num_per_func(struct ice_hw * hw,u32 max)2191 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2192 {
2193 u8 funcs;
2194
2195 #define ICE_CAPS_VALID_FUNCS_M 0xFF
2196 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2197 ICE_CAPS_VALID_FUNCS_M);
2198
2199 if (!funcs)
2200 return 0;
2201
2202 return max / funcs;
2203 }
2204
2205 /**
2206 * ice_print_led_caps - print LED capabilities
2207 * @hw: pointer to the ice_hw instance
2208 * @caps: pointer to common caps instance
2209 * @prefix: string to prefix when printing
2210 * @dbg: set to indicate debug print
2211 */
2212 static void
ice_print_led_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,char const * prefix,bool dbg)2213 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2214 char const *prefix, bool dbg)
2215 {
2216 u8 i;
2217
2218 if (dbg)
2219 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
2220 caps->led_pin_num);
2221 else
2222 ice_info(hw, "%s: led_pin_num = %d\n", prefix,
2223 caps->led_pin_num);
2224
2225 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
2226 if (!caps->led[i])
2227 continue;
2228
2229 if (dbg)
2230 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
2231 prefix, i, caps->led[i]);
2232 else
2233 ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
2234 caps->led[i]);
2235 }
2236 }
2237
2238 /**
2239 * ice_print_sdp_caps - print SDP capabilities
2240 * @hw: pointer to the ice_hw instance
2241 * @caps: pointer to common caps instance
2242 * @prefix: string to prefix when printing
2243 * @dbg: set to indicate debug print
2244 */
2245 static void
ice_print_sdp_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,char const * prefix,bool dbg)2246 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2247 char const *prefix, bool dbg)
2248 {
2249 u8 i;
2250
2251 if (dbg)
2252 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
2253 caps->sdp_pin_num);
2254 else
2255 ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
2256 caps->sdp_pin_num);
2257
2258 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
2259 if (!caps->sdp[i])
2260 continue;
2261
2262 if (dbg)
2263 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
2264 prefix, i, caps->sdp[i]);
2265 else
2266 ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
2267 i, caps->sdp[i]);
2268 }
2269 }
2270
2271 /**
2272 * ice_parse_common_caps - parse common device/function capabilities
2273 * @hw: pointer to the HW struct
2274 * @caps: pointer to common capabilities structure
2275 * @elem: the capability element to parse
2276 * @prefix: message prefix for tracing capabilities
2277 *
2278 * Given a capability element, extract relevant details into the common
2279 * capability structure.
2280 *
2281 * Returns: true if the capability matches one of the common capability ids,
2282 * false otherwise.
2283 */
2284 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)2285 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2286 struct ice_aqc_list_caps_elem *elem, const char *prefix)
2287 {
2288 u32 logical_id = LE32_TO_CPU(elem->logical_id);
2289 u32 phys_id = LE32_TO_CPU(elem->phys_id);
2290 u32 number = LE32_TO_CPU(elem->number);
2291 u16 cap = LE16_TO_CPU(elem->cap);
2292 bool found = true;
2293
2294 switch (cap) {
2295 case ICE_AQC_CAPS_SWITCHING_MODE:
2296 caps->switching_mode = number;
2297 ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
2298 caps->switching_mode);
2299 break;
2300 case ICE_AQC_CAPS_MANAGEABILITY_MODE:
2301 caps->mgmt_mode = number;
2302 caps->mgmt_protocols_mctp = logical_id;
2303 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
2304 caps->mgmt_mode);
2305 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
2306 caps->mgmt_protocols_mctp);
2307 break;
2308 case ICE_AQC_CAPS_OS2BMC:
2309 caps->os2bmc = number;
2310 ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
2311 break;
2312 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2313 caps->valid_functions = number;
2314 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2315 caps->valid_functions);
2316 break;
2317 case ICE_AQC_CAPS_SRIOV:
2318 caps->sr_iov_1_1 = (number == 1);
2319 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2320 caps->sr_iov_1_1);
2321 break;
2322 case ICE_AQC_CAPS_VMDQ:
2323 caps->vmdq = (number == 1);
2324 ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %d\n", prefix, caps->vmdq);
2325 break;
2326 case ICE_AQC_CAPS_802_1QBG:
2327 caps->evb_802_1_qbg = (number == 1);
2328 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
2329 break;
2330 case ICE_AQC_CAPS_802_1BR:
2331 caps->evb_802_1_qbh = (number == 1);
2332 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
2333 break;
2334 case ICE_AQC_CAPS_DCB:
2335 caps->dcb = (number == 1);
2336 caps->active_tc_bitmap = logical_id;
2337 caps->maxtc = phys_id;
2338 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2339 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2340 caps->active_tc_bitmap);
2341 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2342 break;
2343 case ICE_AQC_CAPS_ISCSI:
2344 caps->iscsi = (number == 1);
2345 ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
2346 break;
2347 case ICE_AQC_CAPS_RSS:
2348 caps->rss_table_size = number;
2349 caps->rss_table_entry_width = logical_id;
2350 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2351 caps->rss_table_size);
2352 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2353 caps->rss_table_entry_width);
2354 break;
2355 case ICE_AQC_CAPS_RXQS:
2356 caps->num_rxq = number;
2357 caps->rxq_first_id = phys_id;
2358 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2359 caps->num_rxq);
2360 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2361 caps->rxq_first_id);
2362 break;
2363 case ICE_AQC_CAPS_TXQS:
2364 caps->num_txq = number;
2365 caps->txq_first_id = phys_id;
2366 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2367 caps->num_txq);
2368 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2369 caps->txq_first_id);
2370 break;
2371 case ICE_AQC_CAPS_MSIX:
2372 caps->num_msix_vectors = number;
2373 caps->msix_vector_first_id = phys_id;
2374 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2375 caps->num_msix_vectors);
2376 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2377 caps->msix_vector_first_id);
2378 break;
2379 case ICE_AQC_CAPS_NVM_MGMT:
2380 caps->sec_rev_disabled =
2381 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2382 true : false;
2383 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2384 caps->sec_rev_disabled);
2385 caps->update_disabled =
2386 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2387 true : false;
2388 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2389 caps->update_disabled);
2390 caps->nvm_unified_update =
2391 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2392 true : false;
2393 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2394 caps->nvm_unified_update);
2395 caps->netlist_auth =
2396 (number & ICE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
2397 true : false;
2398 ice_debug(hw, ICE_DBG_INIT, "%s: netlist_auth = %d\n", prefix,
2399 caps->netlist_auth);
2400 break;
2401 case ICE_AQC_CAPS_CEM:
2402 caps->mgmt_cem = (number == 1);
2403 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
2404 caps->mgmt_cem);
2405 break;
2406 case ICE_AQC_CAPS_IWARP:
2407 caps->iwarp = (number == 1);
2408 ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
2409 break;
2410 case ICE_AQC_CAPS_ROCEV2_LAG:
2411 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
2412 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n",
2413 prefix, caps->roce_lag);
2414 break;
2415 case ICE_AQC_CAPS_LED:
2416 if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
2417 caps->led[phys_id] = true;
2418 caps->led_pin_num++;
2419 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
2420 }
2421 break;
2422 case ICE_AQC_CAPS_SDP:
2423 if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2424 caps->sdp[phys_id] = true;
2425 caps->sdp_pin_num++;
2426 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
2427 }
2428 break;
2429 case ICE_AQC_CAPS_WR_CSR_PROT:
2430 caps->wr_csr_prot = number;
2431 caps->wr_csr_prot |= (u64)logical_id << 32;
2432 ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2433 (unsigned long long)caps->wr_csr_prot);
2434 break;
2435 case ICE_AQC_CAPS_WOL_PROXY:
2436 caps->num_wol_proxy_fltr = number;
2437 caps->wol_proxy_vsi_seid = logical_id;
2438 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2439 caps->acpi_prog_mthd = !!(phys_id &
2440 ICE_ACPI_PROG_MTHD_M);
2441 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2442 ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
2443 caps->num_wol_proxy_fltr);
2444 ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
2445 caps->wol_proxy_vsi_seid);
2446 ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n",
2447 prefix, caps->apm_wol_support);
2448 break;
2449 case ICE_AQC_CAPS_MAX_MTU:
2450 caps->max_mtu = number;
2451 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2452 prefix, caps->max_mtu);
2453 break;
2454 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2455 caps->pcie_reset_avoidance = (number > 0);
2456 ice_debug(hw, ICE_DBG_INIT,
2457 "%s: pcie_reset_avoidance = %d\n", prefix,
2458 caps->pcie_reset_avoidance);
2459 break;
2460 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2461 caps->reset_restrict_support = (number == 1);
2462 ice_debug(hw, ICE_DBG_INIT,
2463 "%s: reset_restrict_support = %d\n", prefix,
2464 caps->reset_restrict_support);
2465 break;
2466 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2467 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2468 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2469 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2470 {
2471 u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
2472
2473 caps->ext_topo_dev_img_ver_high[index] = number;
2474 caps->ext_topo_dev_img_ver_low[index] = logical_id;
2475 caps->ext_topo_dev_img_part_num[index] =
2476 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2477 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2478 caps->ext_topo_dev_img_load_en[index] =
2479 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2480 caps->ext_topo_dev_img_prog_en[index] =
2481 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2482 caps->ext_topo_dev_img_ver_schema[index] =
2483 (phys_id & ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA) != 0;
2484 ice_debug(hw, ICE_DBG_INIT,
2485 "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2486 prefix, index,
2487 caps->ext_topo_dev_img_ver_high[index]);
2488 ice_debug(hw, ICE_DBG_INIT,
2489 "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2490 prefix, index,
2491 caps->ext_topo_dev_img_ver_low[index]);
2492 ice_debug(hw, ICE_DBG_INIT,
2493 "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2494 prefix, index,
2495 caps->ext_topo_dev_img_part_num[index]);
2496 ice_debug(hw, ICE_DBG_INIT,
2497 "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2498 prefix, index,
2499 caps->ext_topo_dev_img_load_en[index]);
2500 ice_debug(hw, ICE_DBG_INIT,
2501 "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2502 prefix, index,
2503 caps->ext_topo_dev_img_prog_en[index]);
2504 ice_debug(hw, ICE_DBG_INIT,
2505 "%s: ext_topo_dev_img_ver_schema[%d] = %d\n",
2506 prefix, index,
2507 caps->ext_topo_dev_img_ver_schema[index]);
2508 break;
2509 }
2510 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
2511 caps->tx_sched_topo_comp_mode_en = (number == 1);
2512 break;
2513 case ICE_AQC_CAPS_DYN_FLATTENING:
2514 caps->dyn_flattening_en = (number == 1);
2515 ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n",
2516 prefix, caps->dyn_flattening_en);
2517 break;
2518 case ICE_AQC_CAPS_OROM_RECOVERY_UPDATE:
2519 caps->orom_recovery_update = (number == 1);
2520 ice_debug(hw, ICE_DBG_INIT, "%s: orom_recovery_update = %d\n",
2521 prefix, caps->orom_recovery_update);
2522 break;
2523 default:
2524 /* Not one of the recognized common capabilities */
2525 found = false;
2526 }
2527
2528 return found;
2529 }
2530
2531 /**
2532 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2533 * @hw: pointer to the HW structure
2534 * @caps: pointer to capabilities structure to fix
2535 *
2536 * Re-calculate the capabilities that are dependent on the number of physical
2537 * ports; i.e. some features are not supported or function differently on
2538 * devices with more than 4 ports.
2539 */
2540 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)2541 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2542 {
2543 /* This assumes device capabilities are always scanned before function
2544 * capabilities during the initialization flow.
2545 */
2546 if (hw->dev_caps.num_funcs > 4) {
2547 /* Max 4 TCs per port */
2548 caps->maxtc = 4;
2549 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2550 caps->maxtc);
2551 if (caps->iwarp) {
2552 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2553 caps->iwarp = 0;
2554 }
2555
2556 /* print message only when processing device capabilities
2557 * during initialization.
2558 */
2559 if (caps == &hw->dev_caps.common_cap)
2560 ice_info(hw, "RDMA functionality is not available with the current device configuration.\n");
2561 }
2562 }
2563
2564 /**
2565 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2566 * @hw: pointer to the HW struct
2567 * @func_p: pointer to function capabilities structure
2568 * @cap: pointer to the capability element to parse
2569 *
2570 * Extract function capabilities for ICE_AQC_CAPS_VF.
2571 */
2572 static void
ice_parse_vf_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2573 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2574 struct ice_aqc_list_caps_elem *cap)
2575 {
2576 u32 number = LE32_TO_CPU(cap->number);
2577 u32 logical_id = LE32_TO_CPU(cap->logical_id);
2578
2579 func_p->num_allocd_vfs = number;
2580 func_p->vf_base_id = logical_id;
2581 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2582 func_p->num_allocd_vfs);
2583 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2584 func_p->vf_base_id);
2585 }
2586
2587 /**
2588 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2589 * @hw: pointer to the HW struct
2590 * @func_p: pointer to function capabilities structure
2591 * @cap: pointer to the capability element to parse
2592 *
2593 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2594 */
2595 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2596 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2597 struct ice_aqc_list_caps_elem *cap)
2598 {
2599 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2600 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2601 LE32_TO_CPU(cap->number));
2602 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2603 func_p->guar_num_vsi);
2604 }
2605
2606 /**
2607 * ice_parse_func_caps - Parse function capabilities
2608 * @hw: pointer to the HW struct
2609 * @func_p: pointer to function capabilities structure
2610 * @buf: buffer containing the function capability records
2611 * @cap_count: the number of capabilities
2612 *
2613 * Helper function to parse function (0x000A) capabilities list. For
2614 * capabilities shared between device and function, this relies on
2615 * ice_parse_common_caps.
2616 *
2617 * Loop through the list of provided capabilities and extract the relevant
2618 * data into the function capabilities structured.
2619 */
2620 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)2621 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2622 void *buf, u32 cap_count)
2623 {
2624 struct ice_aqc_list_caps_elem *cap_resp;
2625 u32 i;
2626
2627 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2628
2629 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2630
2631 for (i = 0; i < cap_count; i++) {
2632 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2633 bool found;
2634
2635 found = ice_parse_common_caps(hw, &func_p->common_cap,
2636 &cap_resp[i], "func caps");
2637
2638 switch (cap) {
2639 case ICE_AQC_CAPS_VF:
2640 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2641 break;
2642 case ICE_AQC_CAPS_VSI:
2643 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2644 break;
2645 default:
2646 /* Don't list common capabilities as unknown */
2647 if (!found)
2648 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2649 i, cap);
2650 break;
2651 }
2652 }
2653
2654 ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2655 ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2656
2657 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2658 }
2659
2660 /**
2661 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2662 * @hw: pointer to the HW struct
2663 * @dev_p: pointer to device capabilities structure
2664 * @cap: capability element to parse
2665 *
2666 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2667 */
2668 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2669 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2670 struct ice_aqc_list_caps_elem *cap)
2671 {
2672 u32 number = LE32_TO_CPU(cap->number);
2673
2674 dev_p->num_funcs = ice_hweight32(number);
2675 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2676 dev_p->num_funcs);
2677
2678 }
2679
2680 /**
2681 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2682 * @hw: pointer to the HW struct
2683 * @dev_p: pointer to device capabilities structure
2684 * @cap: capability element to parse
2685 *
2686 * Parse ICE_AQC_CAPS_VF for device capabilities.
2687 */
2688 static void
ice_parse_vf_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2689 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2690 struct ice_aqc_list_caps_elem *cap)
2691 {
2692 u32 number = LE32_TO_CPU(cap->number);
2693
2694 dev_p->num_vfs_exposed = number;
2695 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2696 dev_p->num_vfs_exposed);
2697 }
2698
2699 /**
2700 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2701 * @hw: pointer to the HW struct
2702 * @dev_p: pointer to device capabilities structure
2703 * @cap: capability element to parse
2704 *
2705 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2706 */
2707 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2708 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2709 struct ice_aqc_list_caps_elem *cap)
2710 {
2711 u32 number = LE32_TO_CPU(cap->number);
2712
2713 dev_p->num_vsi_allocd_to_host = number;
2714 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2715 dev_p->num_vsi_allocd_to_host);
2716 }
2717
2718 /**
2719 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
2720 * @hw: pointer to the HW struct
2721 * @dev_p: pointer to device capabilities structure
2722 * @cap: capability element to parse
2723 *
2724 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
2725 */
2726 static void
ice_parse_nac_topo_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2727 ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2728 struct ice_aqc_list_caps_elem *cap)
2729 {
2730 dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
2731 dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
2732
2733 ice_info(hw, "PF is configured in %s mode with IP instance ID %d\n",
2734 (dev_p->nac_topo.mode == 0) ? "primary" : "secondary",
2735 dev_p->nac_topo.id);
2736
2737 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
2738 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
2739 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
2740 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
2741 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
2742 dev_p->nac_topo.id);
2743 }
2744
2745 /**
2746 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
2747 * @hw: pointer to the HW struct
2748 * @dev_p: pointer to device capabilities structure
2749 * @cap: capability element to parse
2750 *
2751 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
2752 * enabled sensors.
2753 */
2754 static void
ice_parse_sensor_reading_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2755 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2756 struct ice_aqc_list_caps_elem *cap)
2757 {
2758 dev_p->supported_sensors = LE32_TO_CPU(cap->number);
2759
2760 ice_debug(hw, ICE_DBG_INIT,
2761 "dev caps: supported sensors (bitmap) = 0x%x\n",
2762 dev_p->supported_sensors);
2763 }
2764
2765 /**
2766 * ice_parse_dev_caps - Parse device capabilities
2767 * @hw: pointer to the HW struct
2768 * @dev_p: pointer to device capabilities structure
2769 * @buf: buffer containing the device capability records
2770 * @cap_count: the number of capabilities
2771 *
2772 * Helper device to parse device (0x000B) capabilities list. For
2773 * capabilities shared between device and function, this relies on
2774 * ice_parse_common_caps.
2775 *
2776 * Loop through the list of provided capabilities and extract the relevant
2777 * data into the device capabilities structured.
2778 */
2779 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2780 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2781 void *buf, u32 cap_count)
2782 {
2783 struct ice_aqc_list_caps_elem *cap_resp;
2784 u32 i;
2785
2786 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2787
2788 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2789
2790 for (i = 0; i < cap_count; i++) {
2791 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2792 bool found;
2793
2794 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2795 &cap_resp[i], "dev caps");
2796
2797 switch (cap) {
2798 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2799 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2800 break;
2801 case ICE_AQC_CAPS_VF:
2802 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2803 break;
2804 case ICE_AQC_CAPS_VSI:
2805 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2806 break;
2807 case ICE_AQC_CAPS_NAC_TOPOLOGY:
2808 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
2809 break;
2810 case ICE_AQC_CAPS_SENSOR_READING:
2811 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
2812 break;
2813 default:
2814 /* Don't list common capabilities as unknown */
2815 if (!found)
2816 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2817 i, cap);
2818 break;
2819 }
2820 }
2821
2822 ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
2823 ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
2824
2825 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2826 }
2827
2828 /**
2829 * ice_aq_list_caps - query function/device capabilities
2830 * @hw: pointer to the HW struct
2831 * @buf: a buffer to hold the capabilities
2832 * @buf_size: size of the buffer
2833 * @cap_count: if not NULL, set to the number of capabilities reported
2834 * @opc: capabilities type to discover, device or function
2835 * @cd: pointer to command details structure or NULL
2836 *
2837 * Get the function (0x000A) or device (0x000B) capabilities description from
2838 * firmware and store it in the buffer.
2839 *
2840 * If the cap_count pointer is not NULL, then it is set to the number of
2841 * capabilities firmware will report. Note that if the buffer size is too
2842 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2843 * cap_count will still be updated in this case. It is recommended that the
2844 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2845 * firmware could return) to avoid this.
2846 */
2847 static enum ice_status
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2848 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2849 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2850 {
2851 struct ice_aqc_list_caps *cmd;
2852 struct ice_aq_desc desc;
2853 enum ice_status status;
2854
2855 cmd = &desc.params.get_cap;
2856
2857 if (opc != ice_aqc_opc_list_func_caps &&
2858 opc != ice_aqc_opc_list_dev_caps)
2859 return ICE_ERR_PARAM;
2860
2861 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2862 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2863
2864 if (cap_count)
2865 *cap_count = LE32_TO_CPU(cmd->count);
2866
2867 return status;
2868 }
2869
2870 /**
2871 * ice_discover_dev_caps - Read and extract device capabilities
2872 * @hw: pointer to the hardware structure
2873 * @dev_caps: pointer to device capabilities structure
2874 *
2875 * Read the device capabilities and extract them into the dev_caps structure
2876 * for later use.
2877 */
2878 static enum ice_status
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)2879 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2880 {
2881 enum ice_status status;
2882 u32 cap_count = 0;
2883 void *cbuf;
2884
2885 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2886 if (!cbuf)
2887 return ICE_ERR_NO_MEMORY;
2888
2889 /* Although the driver doesn't know the number of capabilities the
2890 * device will return, we can simply send a 4KB buffer, the maximum
2891 * possible size that firmware can return.
2892 */
2893 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2894
2895 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2896 ice_aqc_opc_list_dev_caps, NULL);
2897 if (!status)
2898 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2899 ice_free(hw, cbuf);
2900
2901 return status;
2902 }
2903
2904 /**
2905 * ice_discover_func_caps - Read and extract function capabilities
2906 * @hw: pointer to the hardware structure
2907 * @func_caps: pointer to function capabilities structure
2908 *
2909 * Read the function capabilities and extract them into the func_caps structure
2910 * for later use.
2911 */
2912 static enum ice_status
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)2913 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2914 {
2915 enum ice_status status;
2916 u32 cap_count = 0;
2917 void *cbuf;
2918
2919 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2920 if (!cbuf)
2921 return ICE_ERR_NO_MEMORY;
2922
2923 /* Although the driver doesn't know the number of capabilities the
2924 * device will return, we can simply send a 4KB buffer, the maximum
2925 * possible size that firmware can return.
2926 */
2927 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2928
2929 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2930 ice_aqc_opc_list_func_caps, NULL);
2931 if (!status)
2932 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2933 ice_free(hw, cbuf);
2934
2935 return status;
2936 }
2937
2938 /**
2939 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2940 * @hw: pointer to the hardware structure
2941 */
ice_set_safe_mode_caps(struct ice_hw * hw)2942 void ice_set_safe_mode_caps(struct ice_hw *hw)
2943 {
2944 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2945 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2946 struct ice_hw_common_caps cached_caps;
2947 u32 num_funcs;
2948
2949 /* cache some func_caps values that should be restored after memset */
2950 cached_caps = func_caps->common_cap;
2951
2952 /* unset func capabilities */
2953 memset(func_caps, 0, sizeof(*func_caps));
2954
2955 #define ICE_RESTORE_FUNC_CAP(name) \
2956 func_caps->common_cap.name = cached_caps.name
2957
2958 /* restore cached values */
2959 ICE_RESTORE_FUNC_CAP(valid_functions);
2960 ICE_RESTORE_FUNC_CAP(txq_first_id);
2961 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2962 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2963 ICE_RESTORE_FUNC_CAP(max_mtu);
2964 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2965
2966 /* one Tx and one Rx queue in safe mode */
2967 func_caps->common_cap.num_rxq = 1;
2968 func_caps->common_cap.num_txq = 1;
2969
2970 /* two MSIX vectors, one for traffic and one for misc causes */
2971 func_caps->common_cap.num_msix_vectors = 2;
2972 func_caps->guar_num_vsi = 1;
2973
2974 /* cache some dev_caps values that should be restored after memset */
2975 cached_caps = dev_caps->common_cap;
2976 num_funcs = dev_caps->num_funcs;
2977
2978 /* unset dev capabilities */
2979 memset(dev_caps, 0, sizeof(*dev_caps));
2980
2981 #define ICE_RESTORE_DEV_CAP(name) \
2982 dev_caps->common_cap.name = cached_caps.name
2983
2984 /* restore cached values */
2985 ICE_RESTORE_DEV_CAP(valid_functions);
2986 ICE_RESTORE_DEV_CAP(txq_first_id);
2987 ICE_RESTORE_DEV_CAP(rxq_first_id);
2988 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2989 ICE_RESTORE_DEV_CAP(max_mtu);
2990 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2991 dev_caps->num_funcs = num_funcs;
2992
2993 /* one Tx and one Rx queue per function in safe mode */
2994 dev_caps->common_cap.num_rxq = num_funcs;
2995 dev_caps->common_cap.num_txq = num_funcs;
2996
2997 /* two MSIX vectors per function */
2998 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2999 }
3000
3001 /**
3002 * ice_get_caps - get info about the HW
3003 * @hw: pointer to the hardware structure
3004 */
ice_get_caps(struct ice_hw * hw)3005 enum ice_status ice_get_caps(struct ice_hw *hw)
3006 {
3007 enum ice_status status;
3008
3009 status = ice_discover_dev_caps(hw, &hw->dev_caps);
3010 if (status)
3011 return status;
3012
3013 return ice_discover_func_caps(hw, &hw->func_caps);
3014 }
3015
3016 /**
3017 * ice_aq_manage_mac_write - manage MAC address write command
3018 * @hw: pointer to the HW struct
3019 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
3020 * @flags: flags to control write behavior
3021 * @cd: pointer to command details structure or NULL
3022 *
3023 * This function is used to write MAC address to the NVM (0x0108).
3024 */
3025 enum ice_status
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)3026 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
3027 struct ice_sq_cd *cd)
3028 {
3029 struct ice_aqc_manage_mac_write *cmd;
3030 struct ice_aq_desc desc;
3031
3032 cmd = &desc.params.mac_write;
3033 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
3034
3035 cmd->flags = flags;
3036 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
3037
3038 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3039 }
3040
3041 /**
3042 * ice_aq_clear_pxe_mode
3043 * @hw: pointer to the HW struct
3044 *
3045 * Tell the firmware that the driver is taking over from PXE (0x0110).
3046 */
ice_aq_clear_pxe_mode(struct ice_hw * hw)3047 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
3048 {
3049 struct ice_aq_desc desc;
3050
3051 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
3052 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
3053
3054 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3055 }
3056
3057 /**
3058 * ice_clear_pxe_mode - clear pxe operations mode
3059 * @hw: pointer to the HW struct
3060 *
3061 * Make sure all PXE mode settings are cleared, including things
3062 * like descriptor fetch/write-back mode.
3063 */
ice_clear_pxe_mode(struct ice_hw * hw)3064 void ice_clear_pxe_mode(struct ice_hw *hw)
3065 {
3066 if (ice_check_sq_alive(hw, &hw->adminq))
3067 ice_aq_clear_pxe_mode(hw);
3068 }
3069
3070 /**
3071 * ice_aq_set_port_params - set physical port parameters
3072 * @pi: pointer to the port info struct
3073 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
3074 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
3075 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
3076 * @double_vlan: if set double VLAN is enabled
3077 * @cd: pointer to command details structure or NULL
3078 *
3079 * Set Physical port parameters (0x0203)
3080 */
3081 enum ice_status
ice_aq_set_port_params(struct ice_port_info * pi,u16 bad_frame_vsi,bool save_bad_pac,bool pad_short_pac,bool double_vlan,struct ice_sq_cd * cd)3082 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
3083 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
3084 struct ice_sq_cd *cd)
3085 {
3086 struct ice_aqc_set_port_params *cmd;
3087 struct ice_hw *hw = pi->hw;
3088 struct ice_aq_desc desc;
3089 u16 cmd_flags = 0;
3090
3091 cmd = &desc.params.set_port_params;
3092
3093 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
3094 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
3095 if (save_bad_pac)
3096 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
3097 if (pad_short_pac)
3098 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
3099 if (double_vlan)
3100 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
3101 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
3102
3103 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3104 }
3105
3106 /**
3107 * ice_is_100m_speed_supported
3108 * @hw: pointer to the HW struct
3109 *
3110 * returns true if 100M speeds are supported by the device,
3111 * false otherwise.
3112 */
ice_is_100m_speed_supported(struct ice_hw * hw)3113 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3114 {
3115 switch (hw->device_id) {
3116 case ICE_DEV_ID_E822C_SGMII:
3117 case ICE_DEV_ID_E822L_SGMII:
3118 case ICE_DEV_ID_E823L_1GBE:
3119 case ICE_DEV_ID_E823C_SGMII:
3120 return true;
3121 default:
3122 return false;
3123 }
3124 }
3125
3126 /**
3127 * ice_get_link_speed_based_on_phy_type - returns link speed
3128 * @phy_type_low: lower part of phy_type
3129 * @phy_type_high: higher part of phy_type
3130 *
3131 * This helper function will convert an entry in PHY type structure
3132 * [phy_type_low, phy_type_high] to its corresponding link speed.
3133 * Note: In the structure of [phy_type_low, phy_type_high], there should
3134 * be one bit set, as this function will convert one PHY type to its
3135 * speed.
3136 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3137 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3138 */
3139 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)3140 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3141 {
3142 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3143 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3144
3145 switch (phy_type_low) {
3146 case ICE_PHY_TYPE_LOW_100BASE_TX:
3147 case ICE_PHY_TYPE_LOW_100M_SGMII:
3148 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3149 break;
3150 case ICE_PHY_TYPE_LOW_1000BASE_T:
3151 case ICE_PHY_TYPE_LOW_1000BASE_SX:
3152 case ICE_PHY_TYPE_LOW_1000BASE_LX:
3153 case ICE_PHY_TYPE_LOW_1000BASE_KX:
3154 case ICE_PHY_TYPE_LOW_1G_SGMII:
3155 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3156 break;
3157 case ICE_PHY_TYPE_LOW_2500BASE_T:
3158 case ICE_PHY_TYPE_LOW_2500BASE_X:
3159 case ICE_PHY_TYPE_LOW_2500BASE_KX:
3160 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3161 break;
3162 case ICE_PHY_TYPE_LOW_5GBASE_T:
3163 case ICE_PHY_TYPE_LOW_5GBASE_KR:
3164 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3165 break;
3166 case ICE_PHY_TYPE_LOW_10GBASE_T:
3167 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3168 case ICE_PHY_TYPE_LOW_10GBASE_SR:
3169 case ICE_PHY_TYPE_LOW_10GBASE_LR:
3170 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3171 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3172 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3173 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3174 break;
3175 case ICE_PHY_TYPE_LOW_25GBASE_T:
3176 case ICE_PHY_TYPE_LOW_25GBASE_CR:
3177 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3178 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3179 case ICE_PHY_TYPE_LOW_25GBASE_SR:
3180 case ICE_PHY_TYPE_LOW_25GBASE_LR:
3181 case ICE_PHY_TYPE_LOW_25GBASE_KR:
3182 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3183 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3184 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3185 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3186 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3187 break;
3188 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3189 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3190 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3191 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3192 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3193 case ICE_PHY_TYPE_LOW_40G_XLAUI:
3194 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3195 break;
3196 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3197 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3198 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3199 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3200 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3201 case ICE_PHY_TYPE_LOW_50G_LAUI2:
3202 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3203 case ICE_PHY_TYPE_LOW_50G_AUI2:
3204 case ICE_PHY_TYPE_LOW_50GBASE_CP:
3205 case ICE_PHY_TYPE_LOW_50GBASE_SR:
3206 case ICE_PHY_TYPE_LOW_50GBASE_FR:
3207 case ICE_PHY_TYPE_LOW_50GBASE_LR:
3208 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3209 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3210 case ICE_PHY_TYPE_LOW_50G_AUI1:
3211 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3212 break;
3213 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3214 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3215 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3216 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3217 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3218 case ICE_PHY_TYPE_LOW_100G_CAUI4:
3219 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3220 case ICE_PHY_TYPE_LOW_100G_AUI4:
3221 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3222 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3223 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3224 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3225 case ICE_PHY_TYPE_LOW_100GBASE_DR:
3226 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3227 break;
3228 default:
3229 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3230 break;
3231 }
3232
3233 switch (phy_type_high) {
3234 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3235 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3236 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3237 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3238 case ICE_PHY_TYPE_HIGH_100G_AUI2:
3239 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3240 break;
3241 default:
3242 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3243 break;
3244 }
3245
3246 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3247 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3248 return ICE_AQ_LINK_SPEED_UNKNOWN;
3249 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3250 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3251 return ICE_AQ_LINK_SPEED_UNKNOWN;
3252 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3253 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3254 return speed_phy_type_low;
3255 else
3256 return speed_phy_type_high;
3257 }
3258
3259 /**
3260 * ice_update_phy_type
3261 * @phy_type_low: pointer to the lower part of phy_type
3262 * @phy_type_high: pointer to the higher part of phy_type
3263 * @link_speeds_bitmap: targeted link speeds bitmap
3264 *
3265 * Note: For the link_speeds_bitmap structure, you can check it at
3266 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3267 * link_speeds_bitmap include multiple speeds.
3268 *
3269 * Each entry in this [phy_type_low, phy_type_high] structure will
3270 * present a certain link speed. This helper function will turn on bits
3271 * in [phy_type_low, phy_type_high] structure based on the value of
3272 * link_speeds_bitmap input parameter.
3273 */
3274 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)3275 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3276 u16 link_speeds_bitmap)
3277 {
3278 u64 pt_high;
3279 u64 pt_low;
3280 int index;
3281 u16 speed;
3282
3283 /* We first check with low part of phy_type */
3284 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3285 pt_low = BIT_ULL(index);
3286 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3287
3288 if (link_speeds_bitmap & speed)
3289 *phy_type_low |= BIT_ULL(index);
3290 }
3291
3292 /* We then check with high part of phy_type */
3293 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3294 pt_high = BIT_ULL(index);
3295 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3296
3297 if (link_speeds_bitmap & speed)
3298 *phy_type_high |= BIT_ULL(index);
3299 }
3300 }
3301
3302 /**
3303 * ice_aq_set_phy_cfg
3304 * @hw: pointer to the HW struct
3305 * @pi: port info structure of the interested logical port
3306 * @cfg: structure with PHY configuration data to be set
3307 * @cd: pointer to command details structure or NULL
3308 *
3309 * Set the various PHY configuration parameters supported on the Port.
3310 * One or more of the Set PHY config parameters may be ignored in an MFP
3311 * mode as the PF may not have the privilege to set some of the PHY Config
3312 * parameters. This status will be indicated by the command response (0x0601).
3313 */
3314 enum ice_status
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)3315 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3316 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3317 {
3318 struct ice_aq_desc desc;
3319 enum ice_status status;
3320
3321 if (!cfg)
3322 return ICE_ERR_PARAM;
3323
3324 /* Ensure that only valid bits of cfg->caps can be turned on. */
3325 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3326 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3327 cfg->caps);
3328
3329 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3330 }
3331
3332 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3333 desc.params.set_phy.lport_num = pi->lport;
3334 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3335
3336 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3337 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
3338 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3339 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
3340 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3341 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
3342 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3343 cfg->low_power_ctrl_an);
3344 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3345 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3346 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3347 cfg->link_fec_opt);
3348
3349 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3350
3351 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3352 status = ICE_SUCCESS;
3353
3354 if (!status)
3355 pi->phy.curr_user_phy_cfg = *cfg;
3356
3357 return status;
3358 }
3359
3360 /**
3361 * ice_update_link_info - update status of the HW network link
3362 * @pi: port info structure of the interested logical port
3363 */
ice_update_link_info(struct ice_port_info * pi)3364 enum ice_status ice_update_link_info(struct ice_port_info *pi)
3365 {
3366 struct ice_link_status *li;
3367 enum ice_status status;
3368
3369 if (!pi)
3370 return ICE_ERR_PARAM;
3371
3372 li = &pi->phy.link_info;
3373
3374 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3375 if (status)
3376 return status;
3377
3378 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3379 struct ice_aqc_get_phy_caps_data *pcaps;
3380 struct ice_hw *hw;
3381
3382 hw = pi->hw;
3383 pcaps = (struct ice_aqc_get_phy_caps_data *)
3384 ice_malloc(hw, sizeof(*pcaps));
3385 if (!pcaps)
3386 return ICE_ERR_NO_MEMORY;
3387
3388 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3389 pcaps, NULL);
3390
3391 if (status == ICE_SUCCESS)
3392 ice_memcpy(li->module_type, &pcaps->module_type,
3393 sizeof(li->module_type),
3394 ICE_NONDMA_TO_NONDMA);
3395
3396 ice_free(hw, pcaps);
3397 }
3398
3399 return status;
3400 }
3401
3402 /**
3403 * ice_cache_phy_user_req
3404 * @pi: port information structure
3405 * @cache_data: PHY logging data
3406 * @cache_mode: PHY logging mode
3407 *
3408 * Log the user request on (FC, FEC, SPEED) for later user.
3409 */
3410 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)3411 ice_cache_phy_user_req(struct ice_port_info *pi,
3412 struct ice_phy_cache_mode_data cache_data,
3413 enum ice_phy_cache_mode cache_mode)
3414 {
3415 if (!pi)
3416 return;
3417
3418 switch (cache_mode) {
3419 case ICE_FC_MODE:
3420 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3421 break;
3422 case ICE_SPEED_MODE:
3423 pi->phy.curr_user_speed_req =
3424 cache_data.data.curr_user_speed_req;
3425 break;
3426 case ICE_FEC_MODE:
3427 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3428 break;
3429 default:
3430 break;
3431 }
3432 }
3433
3434 /**
3435 * ice_caps_to_fc_mode
3436 * @caps: PHY capabilities
3437 *
3438 * Convert PHY FC capabilities to ice FC mode
3439 */
ice_caps_to_fc_mode(u8 caps)3440 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3441 {
3442 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3443 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3444 return ICE_FC_FULL;
3445
3446 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3447 return ICE_FC_TX_PAUSE;
3448
3449 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3450 return ICE_FC_RX_PAUSE;
3451
3452 return ICE_FC_NONE;
3453 }
3454
3455 /**
3456 * ice_caps_to_fec_mode
3457 * @caps: PHY capabilities
3458 * @fec_options: Link FEC options
3459 *
3460 * Convert PHY FEC capabilities to ice FEC mode
3461 */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)3462 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3463 {
3464 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) {
3465 if (fec_options & ICE_AQC_PHY_FEC_DIS)
3466 return ICE_FEC_DIS_AUTO;
3467 else
3468 return ICE_FEC_AUTO;
3469 }
3470
3471 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3472 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3473 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3474 ICE_AQC_PHY_FEC_25G_KR_REQ))
3475 return ICE_FEC_BASER;
3476
3477 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3478 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3479 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3480 return ICE_FEC_RS;
3481
3482 return ICE_FEC_NONE;
3483 }
3484
3485 /**
3486 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3487 * @pi: port information structure
3488 * @cfg: PHY configuration data to set FC mode
3489 * @req_mode: FC mode to configure
3490 */
3491 static enum ice_status
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)3492 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3493 enum ice_fc_mode req_mode)
3494 {
3495 struct ice_phy_cache_mode_data cache_data;
3496 u8 pause_mask = 0x0;
3497
3498 if (!pi || !cfg)
3499 return ICE_ERR_BAD_PTR;
3500 switch (req_mode) {
3501 case ICE_FC_AUTO:
3502 {
3503 struct ice_aqc_get_phy_caps_data *pcaps;
3504 enum ice_status status;
3505
3506 pcaps = (struct ice_aqc_get_phy_caps_data *)
3507 ice_malloc(pi->hw, sizeof(*pcaps));
3508 if (!pcaps)
3509 return ICE_ERR_NO_MEMORY;
3510 /* Query the value of FC that both the NIC and attached media
3511 * can do.
3512 */
3513 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3514 pcaps, NULL);
3515 if (status) {
3516 ice_free(pi->hw, pcaps);
3517 return status;
3518 }
3519
3520 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3521 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3522
3523 ice_free(pi->hw, pcaps);
3524 break;
3525 }
3526 case ICE_FC_FULL:
3527 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3528 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3529 break;
3530 case ICE_FC_RX_PAUSE:
3531 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3532 break;
3533 case ICE_FC_TX_PAUSE:
3534 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3535 break;
3536 default:
3537 break;
3538 }
3539
3540 /* clear the old pause settings */
3541 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3542 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3543
3544 /* set the new capabilities */
3545 cfg->caps |= pause_mask;
3546
3547 /* Cache user FC request */
3548 cache_data.data.curr_user_fc_req = req_mode;
3549 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3550
3551 return ICE_SUCCESS;
3552 }
3553
3554 /**
3555 * ice_set_fc
3556 * @pi: port information structure
3557 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3558 * @ena_auto_link_update: enable automatic link update
3559 *
3560 * Set the requested flow control mode.
3561 */
3562 enum ice_status
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)3563 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3564 {
3565 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3566 struct ice_aqc_get_phy_caps_data *pcaps;
3567 enum ice_status status;
3568 struct ice_hw *hw;
3569
3570 if (!pi || !aq_failures)
3571 return ICE_ERR_BAD_PTR;
3572
3573 *aq_failures = 0;
3574 hw = pi->hw;
3575
3576 pcaps = (struct ice_aqc_get_phy_caps_data *)
3577 ice_malloc(hw, sizeof(*pcaps));
3578 if (!pcaps)
3579 return ICE_ERR_NO_MEMORY;
3580
3581 /* Get the current PHY config */
3582 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3583 pcaps, NULL);
3584
3585 if (status) {
3586 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3587 goto out;
3588 }
3589
3590 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3591
3592 /* Configure the set PHY data */
3593 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3594 if (status) {
3595 if (status != ICE_ERR_BAD_PTR)
3596 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3597
3598 goto out;
3599 }
3600
3601 /* If the capabilities have changed, then set the new config */
3602 if (cfg.caps != pcaps->caps) {
3603 int retry_count, retry_max = 10;
3604
3605 /* Auto restart link so settings take effect */
3606 if (ena_auto_link_update)
3607 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3608
3609 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3610 if (status) {
3611 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3612 goto out;
3613 }
3614
3615 /* Update the link info
3616 * It sometimes takes a really long time for link to
3617 * come back from the atomic reset. Thus, we wait a
3618 * little bit.
3619 */
3620 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3621 status = ice_update_link_info(pi);
3622
3623 if (status == ICE_SUCCESS)
3624 break;
3625
3626 ice_msec_delay(100, true);
3627 }
3628
3629 if (status)
3630 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3631 }
3632
3633 out:
3634 ice_free(hw, pcaps);
3635 return status;
3636 }
3637
3638 /**
3639 * ice_phy_caps_equals_cfg
3640 * @phy_caps: PHY capabilities
3641 * @phy_cfg: PHY configuration
3642 *
3643 * Helper function to determine if PHY capabilities matches PHY
3644 * configuration
3645 */
3646 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)3647 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3648 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3649 {
3650 u8 caps_mask, cfg_mask;
3651
3652 if (!phy_caps || !phy_cfg)
3653 return false;
3654
3655 /* These bits are not common between capabilities and configuration.
3656 * Do not use them to determine equality.
3657 */
3658 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3659 ICE_AQC_PHY_EN_MOD_QUAL);
3660 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3661
3662 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3663 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3664 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3665 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3666 phy_caps->eee_cap != phy_cfg->eee_cap ||
3667 phy_caps->eeer_value != phy_cfg->eeer_value ||
3668 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3669 return false;
3670
3671 return true;
3672 }
3673
3674 /**
3675 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3676 * @pi: port information structure
3677 * @caps: PHY ability structure to copy data from
3678 * @cfg: PHY configuration structure to copy data to
3679 *
3680 * Helper function to copy AQC PHY get ability data to PHY set configuration
3681 * data structure
3682 */
3683 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)3684 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3685 struct ice_aqc_get_phy_caps_data *caps,
3686 struct ice_aqc_set_phy_cfg_data *cfg)
3687 {
3688 if (!pi || !caps || !cfg)
3689 return;
3690
3691 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3692 cfg->phy_type_low = caps->phy_type_low;
3693 cfg->phy_type_high = caps->phy_type_high;
3694 cfg->caps = caps->caps;
3695 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3696 cfg->eee_cap = caps->eee_cap;
3697 cfg->eeer_value = caps->eeer_value;
3698 cfg->link_fec_opt = caps->link_fec_options;
3699 cfg->module_compliance_enforcement =
3700 caps->module_compliance_enforcement;
3701 }
3702
3703 /**
3704 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3705 * @pi: port information structure
3706 * @cfg: PHY configuration data to set FEC mode
3707 * @fec: FEC mode to configure
3708 */
3709 enum ice_status
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)3710 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3711 enum ice_fec_mode fec)
3712 {
3713 struct ice_aqc_get_phy_caps_data *pcaps;
3714 enum ice_status status = ICE_SUCCESS;
3715 struct ice_hw *hw;
3716
3717 if (!pi || !cfg)
3718 return ICE_ERR_BAD_PTR;
3719
3720 hw = pi->hw;
3721
3722 pcaps = (struct ice_aqc_get_phy_caps_data *)
3723 ice_malloc(hw, sizeof(*pcaps));
3724 if (!pcaps)
3725 return ICE_ERR_NO_MEMORY;
3726
3727 status = ice_aq_get_phy_caps(pi, false,
3728 (ice_fw_supports_report_dflt_cfg(hw) ?
3729 ICE_AQC_REPORT_DFLT_CFG :
3730 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3731
3732 if (status)
3733 goto out;
3734
3735 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3736 cfg->link_fec_opt = pcaps->link_fec_options;
3737
3738 switch (fec) {
3739 case ICE_FEC_BASER:
3740 /* Clear RS bits, and AND BASE-R ability
3741 * bits and OR request bits.
3742 */
3743 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3744 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3745 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3746 ICE_AQC_PHY_FEC_25G_KR_REQ;
3747 break;
3748 case ICE_FEC_RS:
3749 /* Clear BASE-R bits, and AND RS ability
3750 * bits and OR request bits.
3751 */
3752 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3753 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3754 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3755 break;
3756 case ICE_FEC_NONE:
3757 /* Clear all FEC option bits. */
3758 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3759 break;
3760 case ICE_FEC_DIS_AUTO:
3761 /* Set No FEC and auto FEC */
3762 if (!ice_fw_supports_fec_dis_auto(hw)) {
3763 status = ICE_ERR_NOT_SUPPORTED;
3764 goto out;
3765 }
3766 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
3767 /* fall-through */
3768 case ICE_FEC_AUTO:
3769 /* AND auto FEC bit, and all caps bits. */
3770 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3771 cfg->link_fec_opt |= pcaps->link_fec_options;
3772 break;
3773 default:
3774 status = ICE_ERR_PARAM;
3775 break;
3776 }
3777
3778 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3779 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3780 struct ice_link_default_override_tlv tlv;
3781
3782 if (ice_get_link_default_override(&tlv, pi))
3783 goto out;
3784
3785 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3786 (tlv.options & ICE_LINK_OVERRIDE_EN))
3787 cfg->link_fec_opt = tlv.fec_options;
3788 }
3789
3790 out:
3791 ice_free(hw, pcaps);
3792
3793 return status;
3794 }
3795
3796 /**
3797 * ice_get_link_status - get status of the HW network link
3798 * @pi: port information structure
3799 * @link_up: pointer to bool (true/false = linkup/linkdown)
3800 *
3801 * Variable link_up is true if link is up, false if link is down.
3802 * The variable link_up is invalid if status is non zero. As a
3803 * result of this call, link status reporting becomes enabled
3804 */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)3805 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3806 {
3807 struct ice_phy_info *phy_info;
3808 enum ice_status status = ICE_SUCCESS;
3809
3810 if (!pi || !link_up)
3811 return ICE_ERR_PARAM;
3812
3813 phy_info = &pi->phy;
3814
3815 if (phy_info->get_link_info) {
3816 status = ice_update_link_info(pi);
3817
3818 if (status)
3819 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3820 status);
3821 }
3822
3823 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3824
3825 return status;
3826 }
3827
3828 /**
3829 * ice_aq_set_link_restart_an
3830 * @pi: pointer to the port information structure
3831 * @ena_link: if true: enable link, if false: disable link
3832 * @cd: pointer to command details structure or NULL
3833 *
3834 * Sets up the link and restarts the Auto-Negotiation over the link.
3835 */
3836 enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)3837 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3838 struct ice_sq_cd *cd)
3839 {
3840 enum ice_status status = ICE_ERR_AQ_ERROR;
3841 struct ice_aqc_restart_an *cmd;
3842 struct ice_aq_desc desc;
3843
3844 cmd = &desc.params.restart_an;
3845
3846 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3847
3848 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3849 cmd->lport_num = pi->lport;
3850 if (ena_link)
3851 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3852 else
3853 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3854
3855 status = ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3856 if (status)
3857 return status;
3858
3859 if (ena_link)
3860 pi->phy.curr_user_phy_cfg.caps |= ICE_AQC_PHY_EN_LINK;
3861 else
3862 pi->phy.curr_user_phy_cfg.caps &= ~ICE_AQC_PHY_EN_LINK;
3863
3864 return ICE_SUCCESS;
3865 }
3866
3867 /**
3868 * ice_aq_set_event_mask
3869 * @hw: pointer to the HW struct
3870 * @port_num: port number of the physical function
3871 * @mask: event mask to be set
3872 * @cd: pointer to command details structure or NULL
3873 *
3874 * Set event mask (0x0613)
3875 */
3876 enum ice_status
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)3877 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3878 struct ice_sq_cd *cd)
3879 {
3880 struct ice_aqc_set_event_mask *cmd;
3881 struct ice_aq_desc desc;
3882
3883 cmd = &desc.params.set_event_mask;
3884
3885 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3886
3887 cmd->lport_num = port_num;
3888
3889 cmd->event_mask = CPU_TO_LE16(mask);
3890 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3891 }
3892
3893 /**
3894 * ice_aq_set_mac_loopback
3895 * @hw: pointer to the HW struct
3896 * @ena_lpbk: Enable or Disable loopback
3897 * @cd: pointer to command details structure or NULL
3898 *
3899 * Enable/disable loopback on a given port
3900 */
3901 enum ice_status
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)3902 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3903 {
3904 struct ice_aqc_set_mac_lb *cmd;
3905 struct ice_aq_desc desc;
3906
3907 cmd = &desc.params.set_mac_lb;
3908
3909 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3910 if (ena_lpbk)
3911 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3912
3913 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3914 }
3915
3916 /**
3917 * ice_aq_set_port_id_led
3918 * @pi: pointer to the port information
3919 * @is_orig_mode: is this LED set to original mode (by the net-list)
3920 * @cd: pointer to command details structure or NULL
3921 *
3922 * Set LED value for the given port (0x06e9)
3923 */
3924 enum ice_status
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)3925 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3926 struct ice_sq_cd *cd)
3927 {
3928 struct ice_aqc_set_port_id_led *cmd;
3929 struct ice_hw *hw = pi->hw;
3930 struct ice_aq_desc desc;
3931
3932 cmd = &desc.params.set_port_id_led;
3933
3934 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3935
3936 if (is_orig_mode)
3937 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3938 else
3939 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3940
3941 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3942 }
3943
3944 /**
3945 * ice_aq_sff_eeprom
3946 * @hw: pointer to the HW struct
3947 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3948 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3949 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3950 * @page: QSFP page
3951 * @set_page: set or ignore the page
3952 * @data: pointer to data buffer to be read/written to the I2C device.
3953 * @length: 1-16 for read, 1 for write.
3954 * @write: 0 read, 1 for write.
3955 * @cd: pointer to command details structure or NULL
3956 *
3957 * Read/Write SFF EEPROM (0x06EE)
3958 */
3959 enum ice_status
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)3960 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3961 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3962 bool write, struct ice_sq_cd *cd)
3963 {
3964 struct ice_aqc_sff_eeprom *cmd;
3965 struct ice_aq_desc desc;
3966 enum ice_status status;
3967
3968 if (!data || (mem_addr & 0xff00))
3969 return ICE_ERR_PARAM;
3970
3971 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3972 cmd = &desc.params.read_write_sff_param;
3973 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3974 cmd->lport_num = (u8)(lport & 0xff);
3975 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3976 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3977 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3978 ((set_page <<
3979 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3980 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3981 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3982 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3983 if (write)
3984 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3985
3986 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3987 return status;
3988 }
3989
3990 /**
3991 * ice_aq_prog_topo_dev_nvm
3992 * @hw: pointer to the hardware structure
3993 * @topo_params: pointer to structure storing topology parameters for a device
3994 * @cd: pointer to command details structure or NULL
3995 *
3996 * Program Topology Device NVM (0x06F2)
3997 *
3998 */
3999 enum ice_status
ice_aq_prog_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,struct ice_sq_cd * cd)4000 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
4001 struct ice_aqc_link_topo_params *topo_params,
4002 struct ice_sq_cd *cd)
4003 {
4004 struct ice_aqc_prog_topo_dev_nvm *cmd;
4005 struct ice_aq_desc desc;
4006
4007 cmd = &desc.params.prog_topo_dev_nvm;
4008
4009 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
4010
4011 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4012 ICE_NONDMA_TO_NONDMA);
4013
4014 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4015 }
4016
4017 /**
4018 * ice_aq_read_topo_dev_nvm
4019 * @hw: pointer to the hardware structure
4020 * @topo_params: pointer to structure storing topology parameters for a device
4021 * @start_address: byte offset in the topology device NVM
4022 * @data: pointer to data buffer
4023 * @data_size: number of bytes to be read from the topology device NVM
4024 * @cd: pointer to command details structure or NULL
4025 * Read Topology Device NVM (0x06F3)
4026 *
4027 */
4028 enum ice_status
ice_aq_read_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,u32 start_address,u8 * data,u8 data_size,struct ice_sq_cd * cd)4029 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
4030 struct ice_aqc_link_topo_params *topo_params,
4031 u32 start_address, u8 *data, u8 data_size,
4032 struct ice_sq_cd *cd)
4033 {
4034 struct ice_aqc_read_topo_dev_nvm *cmd;
4035 struct ice_aq_desc desc;
4036 enum ice_status status;
4037
4038 if (!data || data_size == 0 ||
4039 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
4040 return ICE_ERR_PARAM;
4041
4042 cmd = &desc.params.read_topo_dev_nvm;
4043
4044 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
4045
4046 desc.datalen = CPU_TO_LE16(data_size);
4047 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4048 ICE_NONDMA_TO_NONDMA);
4049 cmd->start_address = CPU_TO_LE32(start_address);
4050
4051 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4052 if (status)
4053 return status;
4054
4055 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
4056
4057 return ICE_SUCCESS;
4058 }
4059
ice_lut_type_to_size(u16 lut_type)4060 static u16 ice_lut_type_to_size(u16 lut_type)
4061 {
4062 switch (lut_type) {
4063 case ICE_LUT_VSI:
4064 return ICE_LUT_VSI_SIZE;
4065 case ICE_LUT_GLOBAL:
4066 return ICE_LUT_GLOBAL_SIZE;
4067 case ICE_LUT_PF:
4068 return ICE_LUT_PF_SIZE;
4069 case ICE_LUT_PF_SMALL:
4070 return ICE_LUT_PF_SMALL_SIZE;
4071 default:
4072 return 0;
4073 }
4074 }
4075
ice_lut_size_to_flag(u16 lut_size)4076 static u16 ice_lut_size_to_flag(u16 lut_size)
4077 {
4078 u16 f = 0;
4079
4080 switch (lut_size) {
4081 case ICE_LUT_GLOBAL_SIZE:
4082 f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG;
4083 break;
4084 case ICE_LUT_PF_SIZE:
4085 f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG;
4086 break;
4087 default:
4088 break;
4089 }
4090 return f << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S;
4091 }
4092
ice_lut_size_to_type(int lut_size)4093 int ice_lut_size_to_type(int lut_size)
4094 {
4095 switch (lut_size) {
4096 case ICE_LUT_VSI_SIZE:
4097 return ICE_LUT_VSI;
4098 case ICE_LUT_GLOBAL_SIZE:
4099 return ICE_LUT_GLOBAL;
4100 case ICE_LUT_PF_SIZE:
4101 return ICE_LUT_PF;
4102 case ICE_LUT_PF_SMALL_SIZE:
4103 return ICE_LUT_PF_SMALL;
4104 default:
4105 return -1;
4106 }
4107 }
4108
4109 /**
4110 * __ice_aq_get_set_rss_lut
4111 * @hw: pointer to the hardware structure
4112 * @params: RSS LUT parameters
4113 * @set: set true to set the table, false to get the table
4114 *
4115 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
4116 */
4117 static enum ice_status
__ice_aq_get_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * params,bool set)4118 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
4119 {
4120 u16 flags, vsi_id, lut_type, lut_size, glob_lut_idx = 0, vsi_handle;
4121 struct ice_aqc_get_set_rss_lut *cmd_resp;
4122 struct ice_aq_desc desc;
4123 enum ice_status status;
4124 u8 *lut;
4125
4126 if (!params)
4127 return ICE_ERR_PARAM;
4128
4129 vsi_handle = params->vsi_handle;
4130 lut = params->lut;
4131 lut_size = ice_lut_type_to_size(params->lut_type);
4132 lut_type = params->lut_type & ICE_LUT_TYPE_MASK;
4133 cmd_resp = &desc.params.get_set_rss_lut;
4134 if (lut_type == ICE_LUT_GLOBAL)
4135 glob_lut_idx = params->global_lut_id;
4136
4137 if (!lut || !lut_size || !ice_is_vsi_valid(hw, vsi_handle))
4138 return ICE_ERR_PARAM;
4139
4140 if (lut_size > params->lut_size)
4141 return ICE_ERR_INVAL_SIZE;
4142
4143 if (set && lut_size != params->lut_size)
4144 return ICE_ERR_PARAM;
4145
4146 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4147
4148 if (set) {
4149 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
4150 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4151 } else {
4152 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
4153 }
4154
4155 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4156 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
4157 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
4158 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
4159
4160 flags = ice_lut_size_to_flag(lut_size) |
4161 ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
4162 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M) |
4163 ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
4164 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
4165
4166 cmd_resp->flags = CPU_TO_LE16(flags);
4167 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4168 params->lut_size = LE16_TO_CPU(desc.datalen);
4169 return status;
4170 }
4171
4172 /**
4173 * ice_aq_get_rss_lut
4174 * @hw: pointer to the hardware structure
4175 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4176 *
4177 * get the RSS lookup table, PF or VSI type
4178 */
4179 enum ice_status
ice_aq_get_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * get_params)4180 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4181 {
4182 return __ice_aq_get_set_rss_lut(hw, get_params, false);
4183 }
4184
4185 /**
4186 * ice_aq_set_rss_lut
4187 * @hw: pointer to the hardware structure
4188 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4189 *
4190 * set the RSS lookup table, PF or VSI type
4191 */
4192 enum ice_status
ice_aq_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * set_params)4193 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4194 {
4195 return __ice_aq_get_set_rss_lut(hw, set_params, true);
4196 }
4197
4198 /**
4199 * __ice_aq_get_set_rss_key
4200 * @hw: pointer to the HW struct
4201 * @vsi_id: VSI FW index
4202 * @key: pointer to key info struct
4203 * @set: set true to set the key, false to get the key
4204 *
4205 * get (0x0B04) or set (0x0B02) the RSS key per VSI
4206 */
4207 static enum
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)4208 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4209 struct ice_aqc_get_set_rss_keys *key,
4210 bool set)
4211 {
4212 struct ice_aqc_get_set_rss_key *cmd_resp;
4213 u16 key_size = sizeof(*key);
4214 struct ice_aq_desc desc;
4215
4216 cmd_resp = &desc.params.get_set_rss_key;
4217
4218 if (set) {
4219 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4220 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4221 } else {
4222 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4223 }
4224
4225 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4226 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
4227 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
4228 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
4229
4230 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4231 }
4232
4233 /**
4234 * ice_aq_get_rss_key
4235 * @hw: pointer to the HW struct
4236 * @vsi_handle: software VSI handle
4237 * @key: pointer to key info struct
4238 *
4239 * get the RSS key per VSI
4240 */
4241 enum ice_status
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)4242 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4243 struct ice_aqc_get_set_rss_keys *key)
4244 {
4245 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4246 return ICE_ERR_PARAM;
4247
4248 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4249 key, false);
4250 }
4251
4252 /**
4253 * ice_aq_set_rss_key
4254 * @hw: pointer to the HW struct
4255 * @vsi_handle: software VSI handle
4256 * @keys: pointer to key info struct
4257 *
4258 * set the RSS key per VSI
4259 */
4260 enum ice_status
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)4261 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4262 struct ice_aqc_get_set_rss_keys *keys)
4263 {
4264 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4265 return ICE_ERR_PARAM;
4266
4267 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4268 keys, true);
4269 }
4270
4271 /**
4272 * ice_aq_add_lan_txq
4273 * @hw: pointer to the hardware structure
4274 * @num_qgrps: Number of added queue groups
4275 * @qg_list: list of queue groups to be added
4276 * @buf_size: size of buffer for indirect command
4277 * @cd: pointer to command details structure or NULL
4278 *
4279 * Add Tx LAN queue (0x0C30)
4280 *
4281 * NOTE:
4282 * Prior to calling add Tx LAN queue:
4283 * Initialize the following as part of the Tx queue context:
4284 * Completion queue ID if the queue uses Completion queue, Quanta profile,
4285 * Cache profile and Packet shaper profile.
4286 *
4287 * After add Tx LAN queue AQ command is completed:
4288 * Interrupts should be associated with specific queues,
4289 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4290 * flow.
4291 */
4292 enum ice_status
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)4293 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4294 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4295 struct ice_sq_cd *cd)
4296 {
4297 struct ice_aqc_add_tx_qgrp *list;
4298 struct ice_aqc_add_txqs *cmd;
4299 struct ice_aq_desc desc;
4300 u16 i, sum_size = 0;
4301
4302 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4303
4304 cmd = &desc.params.add_txqs;
4305
4306 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4307
4308 if (!qg_list)
4309 return ICE_ERR_PARAM;
4310
4311 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4312 return ICE_ERR_PARAM;
4313
4314 for (i = 0, list = qg_list; i < num_qgrps; i++) {
4315 sum_size += ice_struct_size(list, txqs, list->num_txqs);
4316 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4317 list->num_txqs);
4318 }
4319
4320 if (buf_size != sum_size)
4321 return ICE_ERR_PARAM;
4322
4323 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4324
4325 cmd->num_qgrps = num_qgrps;
4326
4327 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4328 }
4329
4330 /**
4331 * ice_aq_dis_lan_txq
4332 * @hw: pointer to the hardware structure
4333 * @num_qgrps: number of groups in the list
4334 * @qg_list: the list of groups to disable
4335 * @buf_size: the total size of the qg_list buffer in bytes
4336 * @rst_src: if called due to reset, specifies the reset source
4337 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4338 * @cd: pointer to command details structure or NULL
4339 *
4340 * Disable LAN Tx queue (0x0C31)
4341 */
4342 static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4343 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4344 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4345 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4346 struct ice_sq_cd *cd)
4347 {
4348 struct ice_aqc_dis_txq_item *item;
4349 struct ice_aqc_dis_txqs *cmd;
4350 struct ice_aq_desc desc;
4351 enum ice_status status;
4352 u16 i, sz = 0;
4353
4354 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4355 cmd = &desc.params.dis_txqs;
4356 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4357
4358 /* qg_list can be NULL only in VM/VF reset flow */
4359 if (!qg_list && !rst_src)
4360 return ICE_ERR_PARAM;
4361
4362 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4363 return ICE_ERR_PARAM;
4364
4365 cmd->num_entries = num_qgrps;
4366
4367 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4368 ICE_AQC_Q_DIS_TIMEOUT_M);
4369
4370 switch (rst_src) {
4371 case ICE_VM_RESET:
4372 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4373 cmd->vmvf_and_timeout |=
4374 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4375 break;
4376 case ICE_VF_RESET:
4377 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4378 /* In this case, FW expects vmvf_num to be absolute VF ID */
4379 cmd->vmvf_and_timeout |=
4380 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
4381 ICE_AQC_Q_DIS_VMVF_NUM_M);
4382 break;
4383 case ICE_NO_RESET:
4384 default:
4385 break;
4386 }
4387
4388 /* flush pipe on time out */
4389 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4390 /* If no queue group info, we are in a reset flow. Issue the AQ */
4391 if (!qg_list)
4392 goto do_aq;
4393
4394 /* set RD bit to indicate that command buffer is provided by the driver
4395 * and it needs to be read by the firmware
4396 */
4397 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4398
4399 for (i = 0, item = qg_list; i < num_qgrps; i++) {
4400 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4401
4402 /* If the num of queues is even, add 2 bytes of padding */
4403 if ((item->num_qs % 2) == 0)
4404 item_size += 2;
4405
4406 sz += item_size;
4407
4408 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4409 }
4410
4411 if (buf_size != sz)
4412 return ICE_ERR_PARAM;
4413
4414 do_aq:
4415 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4416 if (status) {
4417 if (!qg_list)
4418 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4419 vmvf_num, hw->adminq.sq_last_status);
4420 else
4421 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4422 LE16_TO_CPU(qg_list[0].q_id[0]),
4423 hw->adminq.sq_last_status);
4424 }
4425 return status;
4426 }
4427
4428 /**
4429 * ice_aq_move_recfg_lan_txq
4430 * @hw: pointer to the hardware structure
4431 * @num_qs: number of queues to move/reconfigure
4432 * @is_move: true if this operation involves node movement
4433 * @is_tc_change: true if this operation involves a TC change
4434 * @subseq_call: true if this operation is a subsequent call
4435 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4436 * @timeout: timeout in units of 100 usec (valid values 0-50)
4437 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4438 * @buf: struct containing src/dest TEID and per-queue info
4439 * @buf_size: size of buffer for indirect command
4440 * @txqs_moved: out param, number of queues successfully moved
4441 * @cd: pointer to command details structure or NULL
4442 *
4443 * Move / Reconfigure Tx LAN queues (0x0C32)
4444 */
4445 enum ice_status
ice_aq_move_recfg_lan_txq(struct ice_hw * hw,u8 num_qs,bool is_move,bool is_tc_change,bool subseq_call,bool flush_pipe,u8 timeout,u32 * blocked_cgds,struct ice_aqc_move_txqs_data * buf,u16 buf_size,u8 * txqs_moved,struct ice_sq_cd * cd)4446 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4447 bool is_tc_change, bool subseq_call, bool flush_pipe,
4448 u8 timeout, u32 *blocked_cgds,
4449 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4450 u8 *txqs_moved, struct ice_sq_cd *cd)
4451 {
4452 struct ice_aqc_move_txqs *cmd;
4453 struct ice_aq_desc desc;
4454 enum ice_status status;
4455
4456 cmd = &desc.params.move_txqs;
4457 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4458
4459 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4460 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4461 return ICE_ERR_PARAM;
4462
4463 if (is_tc_change && !flush_pipe && !blocked_cgds)
4464 return ICE_ERR_PARAM;
4465
4466 if (!is_move && !is_tc_change)
4467 return ICE_ERR_PARAM;
4468
4469 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4470
4471 if (is_move)
4472 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4473
4474 if (is_tc_change)
4475 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4476
4477 if (subseq_call)
4478 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4479
4480 if (flush_pipe)
4481 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4482
4483 cmd->num_qs = num_qs;
4484 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4485 ICE_AQC_Q_CMD_TIMEOUT_M);
4486
4487 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4488
4489 if (!status && txqs_moved)
4490 *txqs_moved = cmd->num_qs;
4491
4492 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4493 is_tc_change && !flush_pipe)
4494 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4495
4496 return status;
4497 }
4498
4499 /**
4500 * ice_aq_add_rdma_qsets
4501 * @hw: pointer to the hardware structure
4502 * @num_qset_grps: Number of RDMA Qset groups
4503 * @qset_list: list of qset groups to be added
4504 * @buf_size: size of buffer for indirect command
4505 * @cd: pointer to command details structure or NULL
4506 *
4507 * Add Tx RDMA Qsets (0x0C33)
4508 */
4509 enum ice_status
ice_aq_add_rdma_qsets(struct ice_hw * hw,u8 num_qset_grps,struct ice_aqc_add_rdma_qset_data * qset_list,u16 buf_size,struct ice_sq_cd * cd)4510 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4511 struct ice_aqc_add_rdma_qset_data *qset_list,
4512 u16 buf_size, struct ice_sq_cd *cd)
4513 {
4514 struct ice_aqc_add_rdma_qset_data *list;
4515 struct ice_aqc_add_rdma_qset *cmd;
4516 struct ice_aq_desc desc;
4517 u16 i, sum_size = 0;
4518
4519 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4520
4521 cmd = &desc.params.add_rdma_qset;
4522
4523 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4524
4525 if (!qset_list)
4526 return ICE_ERR_PARAM;
4527
4528 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4529 return ICE_ERR_PARAM;
4530
4531 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4532 u16 num_qsets = LE16_TO_CPU(list->num_qsets);
4533
4534 sum_size += ice_struct_size(list, rdma_qsets, num_qsets);
4535 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4536 num_qsets);
4537 }
4538
4539 if (buf_size != sum_size)
4540 return ICE_ERR_PARAM;
4541
4542 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4543
4544 cmd->num_qset_grps = num_qset_grps;
4545
4546 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4547 }
4548
4549 /* End of FW Admin Queue command wrappers */
4550
4551 /**
4552 * ice_write_byte - write a byte to a packed context structure
4553 * @src_ctx: the context structure to read from
4554 * @dest_ctx: the context to be written to
4555 * @ce_info: a description of the struct to be filled
4556 */
4557 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4558 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4559 {
4560 u8 src_byte, dest_byte, mask;
4561 u8 *from, *dest;
4562 u16 shift_width;
4563
4564 /* copy from the next struct field */
4565 from = src_ctx + ce_info->offset;
4566
4567 /* prepare the bits and mask */
4568 shift_width = ce_info->lsb % 8;
4569 mask = (u8)(BIT(ce_info->width) - 1);
4570
4571 src_byte = *from;
4572 src_byte &= mask;
4573
4574 /* shift to correct alignment */
4575 mask <<= shift_width;
4576 src_byte <<= shift_width;
4577
4578 /* get the current bits from the target bit string */
4579 dest = dest_ctx + (ce_info->lsb / 8);
4580
4581 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4582
4583 dest_byte &= ~mask; /* get the bits not changing */
4584 dest_byte |= src_byte; /* add in the new bits */
4585
4586 /* put it all back */
4587 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4588 }
4589
4590 /**
4591 * ice_write_word - write a word to a packed context structure
4592 * @src_ctx: the context structure to read from
4593 * @dest_ctx: the context to be written to
4594 * @ce_info: a description of the struct to be filled
4595 */
4596 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4597 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4598 {
4599 u16 src_word, mask;
4600 __le16 dest_word;
4601 u8 *from, *dest;
4602 u16 shift_width;
4603
4604 /* copy from the next struct field */
4605 from = src_ctx + ce_info->offset;
4606
4607 /* prepare the bits and mask */
4608 shift_width = ce_info->lsb % 8;
4609 mask = BIT(ce_info->width) - 1;
4610
4611 /* don't swizzle the bits until after the mask because the mask bits
4612 * will be in a different bit position on big endian machines
4613 */
4614 src_word = *(u16 *)from;
4615 src_word &= mask;
4616
4617 /* shift to correct alignment */
4618 mask <<= shift_width;
4619 src_word <<= shift_width;
4620
4621 /* get the current bits from the target bit string */
4622 dest = dest_ctx + (ce_info->lsb / 8);
4623
4624 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
4625
4626 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
4627 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
4628
4629 /* put it all back */
4630 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4631 }
4632
4633 /**
4634 * ice_write_dword - write a dword to a packed context structure
4635 * @src_ctx: the context structure to read from
4636 * @dest_ctx: the context to be written to
4637 * @ce_info: a description of the struct to be filled
4638 */
4639 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4640 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4641 {
4642 u32 src_dword, mask;
4643 __le32 dest_dword;
4644 u8 *from, *dest;
4645 u16 shift_width;
4646
4647 /* copy from the next struct field */
4648 from = src_ctx + ce_info->offset;
4649
4650 /* prepare the bits and mask */
4651 shift_width = ce_info->lsb % 8;
4652
4653 /* if the field width is exactly 32 on an x86 machine, then the shift
4654 * operation will not work because the SHL instructions count is masked
4655 * to 5 bits so the shift will do nothing
4656 */
4657 if (ce_info->width < 32)
4658 mask = BIT(ce_info->width) - 1;
4659 else
4660 mask = (u32)~0;
4661
4662 /* don't swizzle the bits until after the mask because the mask bits
4663 * will be in a different bit position on big endian machines
4664 */
4665 src_dword = *(u32 *)from;
4666 src_dword &= mask;
4667
4668 /* shift to correct alignment */
4669 mask <<= shift_width;
4670 src_dword <<= shift_width;
4671
4672 /* get the current bits from the target bit string */
4673 dest = dest_ctx + (ce_info->lsb / 8);
4674
4675 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4676
4677 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
4678 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
4679
4680 /* put it all back */
4681 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4682 }
4683
4684 /**
4685 * ice_write_qword - write a qword to a packed context structure
4686 * @src_ctx: the context structure to read from
4687 * @dest_ctx: the context to be written to
4688 * @ce_info: a description of the struct to be filled
4689 */
4690 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4691 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4692 {
4693 u64 src_qword, mask;
4694 __le64 dest_qword;
4695 u8 *from, *dest;
4696 u16 shift_width;
4697
4698 /* copy from the next struct field */
4699 from = src_ctx + ce_info->offset;
4700
4701 /* prepare the bits and mask */
4702 shift_width = ce_info->lsb % 8;
4703
4704 /* if the field width is exactly 64 on an x86 machine, then the shift
4705 * operation will not work because the SHL instructions count is masked
4706 * to 6 bits so the shift will do nothing
4707 */
4708 if (ce_info->width < 64)
4709 mask = BIT_ULL(ce_info->width) - 1;
4710 else
4711 mask = (u64)~0;
4712
4713 /* don't swizzle the bits until after the mask because the mask bits
4714 * will be in a different bit position on big endian machines
4715 */
4716 src_qword = *(u64 *)from;
4717 src_qword &= mask;
4718
4719 /* shift to correct alignment */
4720 mask <<= shift_width;
4721 src_qword <<= shift_width;
4722
4723 /* get the current bits from the target bit string */
4724 dest = dest_ctx + (ce_info->lsb / 8);
4725
4726 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4727
4728 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
4729 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
4730
4731 /* put it all back */
4732 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4733 }
4734
4735 /**
4736 * ice_set_ctx - set context bits in packed structure
4737 * @hw: pointer to the hardware structure
4738 * @src_ctx: pointer to a generic non-packed context structure
4739 * @dest_ctx: pointer to memory for the packed structure
4740 * @ce_info: a description of the structure to be transformed
4741 */
4742 enum ice_status
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4743 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4744 const struct ice_ctx_ele *ce_info)
4745 {
4746 int f;
4747
4748 for (f = 0; ce_info[f].width; f++) {
4749 /* We have to deal with each element of the FW response
4750 * using the correct size so that we are correct regardless
4751 * of the endianness of the machine.
4752 */
4753 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4754 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4755 f, ce_info[f].width, ce_info[f].size_of);
4756 continue;
4757 }
4758 switch (ce_info[f].size_of) {
4759 case sizeof(u8):
4760 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4761 break;
4762 case sizeof(u16):
4763 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4764 break;
4765 case sizeof(u32):
4766 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4767 break;
4768 case sizeof(u64):
4769 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4770 break;
4771 default:
4772 return ICE_ERR_INVAL_SIZE;
4773 }
4774 }
4775
4776 return ICE_SUCCESS;
4777 }
4778
4779 /**
4780 * ice_aq_get_internal_data
4781 * @hw: pointer to the hardware structure
4782 * @cluster_id: specific cluster to dump
4783 * @table_id: table ID within cluster
4784 * @start: index of line in the block to read
4785 * @buf: dump buffer
4786 * @buf_size: dump buffer size
4787 * @ret_buf_size: return buffer size (returned by FW)
4788 * @ret_next_cluster: next cluster to read (returned by FW)
4789 * @ret_next_table: next block to read (returned by FW)
4790 * @ret_next_index: next index to read (returned by FW)
4791 * @cd: pointer to command details structure
4792 *
4793 * Get internal FW/HW data (0xFF08) for debug purposes.
4794 */
4795 enum ice_status
ice_aq_get_internal_data(struct ice_hw * hw,u16 cluster_id,u16 table_id,u32 start,void * buf,u16 buf_size,u16 * ret_buf_size,u16 * ret_next_cluster,u16 * ret_next_table,u32 * ret_next_index,struct ice_sq_cd * cd)4796 ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
4797 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
4798 u16 *ret_next_cluster, u16 *ret_next_table,
4799 u32 *ret_next_index, struct ice_sq_cd *cd)
4800 {
4801 struct ice_aqc_debug_dump_internals *cmd;
4802 struct ice_aq_desc desc;
4803 enum ice_status status;
4804
4805 cmd = &desc.params.debug_dump;
4806
4807 if (buf_size == 0 || !buf)
4808 return ICE_ERR_PARAM;
4809
4810 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
4811
4812 cmd->cluster_id = CPU_TO_LE16(cluster_id);
4813 cmd->table_id = CPU_TO_LE16(table_id);
4814 cmd->idx = CPU_TO_LE32(start);
4815
4816 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4817
4818 if (!status) {
4819 if (ret_buf_size)
4820 *ret_buf_size = LE16_TO_CPU(desc.datalen);
4821 if (ret_next_cluster)
4822 *ret_next_cluster = LE16_TO_CPU(cmd->cluster_id);
4823 if (ret_next_table)
4824 *ret_next_table = LE16_TO_CPU(cmd->table_id);
4825 if (ret_next_index)
4826 *ret_next_index = LE32_TO_CPU(cmd->idx);
4827 }
4828
4829 return status;
4830 }
4831
4832 /**
4833 * ice_read_byte - read context byte into struct
4834 * @src_ctx: the context structure to read from
4835 * @dest_ctx: the context to be written to
4836 * @ce_info: a description of the struct to be filled
4837 */
4838 static void
ice_read_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4839 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4840 {
4841 u8 dest_byte, mask;
4842 u8 *src, *target;
4843 u16 shift_width;
4844
4845 /* prepare the bits and mask */
4846 shift_width = ce_info->lsb % 8;
4847 mask = (u8)(BIT(ce_info->width) - 1);
4848
4849 /* shift to correct alignment */
4850 mask <<= shift_width;
4851
4852 /* get the current bits from the src bit string */
4853 src = src_ctx + (ce_info->lsb / 8);
4854
4855 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4856
4857 dest_byte &= mask;
4858
4859 dest_byte >>= shift_width;
4860
4861 /* get the address from the struct field */
4862 target = dest_ctx + ce_info->offset;
4863
4864 /* put it back in the struct */
4865 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4866 }
4867
4868 /**
4869 * ice_read_word - read context word into struct
4870 * @src_ctx: the context structure to read from
4871 * @dest_ctx: the context to be written to
4872 * @ce_info: a description of the struct to be filled
4873 */
4874 static void
ice_read_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4875 ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4876 {
4877 u16 dest_word, mask;
4878 u8 *src, *target;
4879 __le16 src_word;
4880 u16 shift_width;
4881
4882 /* prepare the bits and mask */
4883 shift_width = ce_info->lsb % 8;
4884 mask = BIT(ce_info->width) - 1;
4885
4886 /* shift to correct alignment */
4887 mask <<= shift_width;
4888
4889 /* get the current bits from the src bit string */
4890 src = src_ctx + (ce_info->lsb / 8);
4891
4892 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4893
4894 /* the data in the memory is stored as little endian so mask it
4895 * correctly
4896 */
4897 src_word &= CPU_TO_LE16(mask);
4898
4899 /* get the data back into host order before shifting */
4900 dest_word = LE16_TO_CPU(src_word);
4901
4902 dest_word >>= shift_width;
4903
4904 /* get the address from the struct field */
4905 target = dest_ctx + ce_info->offset;
4906
4907 /* put it back in the struct */
4908 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4909 }
4910
4911 /**
4912 * ice_read_dword - read context dword into struct
4913 * @src_ctx: the context structure to read from
4914 * @dest_ctx: the context to be written to
4915 * @ce_info: a description of the struct to be filled
4916 */
4917 static void
ice_read_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4918 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4919 {
4920 u32 dest_dword, mask;
4921 __le32 src_dword;
4922 u8 *src, *target;
4923 u16 shift_width;
4924
4925 /* prepare the bits and mask */
4926 shift_width = ce_info->lsb % 8;
4927
4928 /* if the field width is exactly 32 on an x86 machine, then the shift
4929 * operation will not work because the SHL instructions count is masked
4930 * to 5 bits so the shift will do nothing
4931 */
4932 if (ce_info->width < 32)
4933 mask = BIT(ce_info->width) - 1;
4934 else
4935 mask = (u32)~0;
4936
4937 /* shift to correct alignment */
4938 mask <<= shift_width;
4939
4940 /* get the current bits from the src bit string */
4941 src = src_ctx + (ce_info->lsb / 8);
4942
4943 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4944
4945 /* the data in the memory is stored as little endian so mask it
4946 * correctly
4947 */
4948 src_dword &= CPU_TO_LE32(mask);
4949
4950 /* get the data back into host order before shifting */
4951 dest_dword = LE32_TO_CPU(src_dword);
4952
4953 dest_dword >>= shift_width;
4954
4955 /* get the address from the struct field */
4956 target = dest_ctx + ce_info->offset;
4957
4958 /* put it back in the struct */
4959 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4960 }
4961
4962 /**
4963 * ice_read_qword - read context qword into struct
4964 * @src_ctx: the context structure to read from
4965 * @dest_ctx: the context to be written to
4966 * @ce_info: a description of the struct to be filled
4967 */
4968 static void
ice_read_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4969 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4970 {
4971 u64 dest_qword, mask;
4972 __le64 src_qword;
4973 u8 *src, *target;
4974 u16 shift_width;
4975
4976 /* prepare the bits and mask */
4977 shift_width = ce_info->lsb % 8;
4978
4979 /* if the field width is exactly 64 on an x86 machine, then the shift
4980 * operation will not work because the SHL instructions count is masked
4981 * to 6 bits so the shift will do nothing
4982 */
4983 if (ce_info->width < 64)
4984 mask = BIT_ULL(ce_info->width) - 1;
4985 else
4986 mask = (u64)~0;
4987
4988 /* shift to correct alignment */
4989 mask <<= shift_width;
4990
4991 /* get the current bits from the src bit string */
4992 src = src_ctx + (ce_info->lsb / 8);
4993
4994 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4995
4996 /* the data in the memory is stored as little endian so mask it
4997 * correctly
4998 */
4999 src_qword &= CPU_TO_LE64(mask);
5000
5001 /* get the data back into host order before shifting */
5002 dest_qword = LE64_TO_CPU(src_qword);
5003
5004 dest_qword >>= shift_width;
5005
5006 /* get the address from the struct field */
5007 target = dest_ctx + ce_info->offset;
5008
5009 /* put it back in the struct */
5010 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
5011 }
5012
5013 /**
5014 * ice_get_ctx - extract context bits from a packed structure
5015 * @src_ctx: pointer to a generic packed context structure
5016 * @dest_ctx: pointer to a generic non-packed context structure
5017 * @ce_info: a description of the structure to be read from
5018 */
5019 enum ice_status
ice_get_ctx(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5020 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5021 {
5022 int f;
5023
5024 for (f = 0; ce_info[f].width; f++) {
5025 switch (ce_info[f].size_of) {
5026 case 1:
5027 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
5028 break;
5029 case 2:
5030 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
5031 break;
5032 case 4:
5033 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
5034 break;
5035 case 8:
5036 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
5037 break;
5038 default:
5039 /* nothing to do, just keep going */
5040 break;
5041 }
5042 }
5043
5044 return ICE_SUCCESS;
5045 }
5046
5047 /**
5048 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
5049 * @hw: pointer to the HW struct
5050 * @vsi_handle: software VSI handle
5051 * @tc: TC number
5052 * @q_handle: software queue handle
5053 */
5054 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)5055 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
5056 {
5057 struct ice_vsi_ctx *vsi;
5058 struct ice_q_ctx *q_ctx;
5059
5060 vsi = ice_get_vsi_ctx(hw, vsi_handle);
5061 if (!vsi)
5062 return NULL;
5063 if (q_handle >= vsi->num_lan_q_entries[tc])
5064 return NULL;
5065 if (!vsi->lan_q_ctx[tc])
5066 return NULL;
5067 q_ctx = vsi->lan_q_ctx[tc];
5068 return &q_ctx[q_handle];
5069 }
5070
5071 /**
5072 * ice_ena_vsi_txq
5073 * @pi: port information structure
5074 * @vsi_handle: software VSI handle
5075 * @tc: TC number
5076 * @q_handle: software queue handle
5077 * @num_qgrps: Number of added queue groups
5078 * @buf: list of queue groups to be added
5079 * @buf_size: size of buffer for indirect command
5080 * @cd: pointer to command details structure or NULL
5081 *
5082 * This function adds one LAN queue
5083 */
5084 enum ice_status
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)5085 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
5086 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
5087 struct ice_sq_cd *cd)
5088 {
5089 struct ice_aqc_txsched_elem_data node = { 0 };
5090 struct ice_sched_node *parent;
5091 struct ice_q_ctx *q_ctx;
5092 enum ice_status status;
5093 struct ice_hw *hw;
5094
5095 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5096 return ICE_ERR_CFG;
5097
5098 if (num_qgrps > 1 || buf->num_txqs > 1)
5099 return ICE_ERR_MAX_LIMIT;
5100
5101 hw = pi->hw;
5102
5103 if (!ice_is_vsi_valid(hw, vsi_handle))
5104 return ICE_ERR_PARAM;
5105
5106 ice_acquire_lock(&pi->sched_lock);
5107
5108 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
5109 if (!q_ctx) {
5110 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
5111 q_handle);
5112 status = ICE_ERR_PARAM;
5113 goto ena_txq_exit;
5114 }
5115
5116 /* find a parent node */
5117 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5118 ICE_SCHED_NODE_OWNER_LAN);
5119 if (!parent) {
5120 status = ICE_ERR_PARAM;
5121 goto ena_txq_exit;
5122 }
5123
5124 buf->parent_teid = parent->info.node_teid;
5125 node.parent_teid = parent->info.node_teid;
5126 /* Mark that the values in the "generic" section as valid. The default
5127 * value in the "generic" section is zero. This means that :
5128 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
5129 * - 0 priority among siblings, indicated by Bit 1-3.
5130 * - WFQ, indicated by Bit 4.
5131 * - 0 Adjustment value is used in PSM credit update flow, indicated by
5132 * Bit 5-6.
5133 * - Bit 7 is reserved.
5134 * Without setting the generic section as valid in valid_sections, the
5135 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
5136 */
5137 buf->txqs[0].info.valid_sections =
5138 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5139 ICE_AQC_ELEM_VALID_EIR;
5140 buf->txqs[0].info.generic = 0;
5141 buf->txqs[0].info.cir_bw.bw_profile_idx =
5142 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5143 buf->txqs[0].info.cir_bw.bw_alloc =
5144 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5145 buf->txqs[0].info.eir_bw.bw_profile_idx =
5146 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5147 buf->txqs[0].info.eir_bw.bw_alloc =
5148 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5149
5150 /* add the LAN queue */
5151 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
5152 if (status != ICE_SUCCESS) {
5153 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
5154 LE16_TO_CPU(buf->txqs[0].txq_id),
5155 hw->adminq.sq_last_status);
5156 goto ena_txq_exit;
5157 }
5158
5159 node.node_teid = buf->txqs[0].q_teid;
5160 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5161 q_ctx->q_handle = q_handle;
5162 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
5163
5164 /* add a leaf node into scheduler tree queue layer */
5165 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
5166 if (!status)
5167 status = ice_sched_replay_q_bw(pi, q_ctx);
5168
5169 ena_txq_exit:
5170 ice_release_lock(&pi->sched_lock);
5171 return status;
5172 }
5173
5174 /**
5175 * ice_dis_vsi_txq
5176 * @pi: port information structure
5177 * @vsi_handle: software VSI handle
5178 * @tc: TC number
5179 * @num_queues: number of queues
5180 * @q_handles: pointer to software queue handle array
5181 * @q_ids: pointer to the q_id array
5182 * @q_teids: pointer to queue node teids
5183 * @rst_src: if called due to reset, specifies the reset source
5184 * @vmvf_num: the relative VM or VF number that is undergoing the reset
5185 * @cd: pointer to command details structure or NULL
5186 *
5187 * This function removes queues and their corresponding nodes in SW DB
5188 */
5189 enum ice_status
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)5190 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
5191 u16 *q_handles, u16 *q_ids, u32 *q_teids,
5192 enum ice_disq_rst_src rst_src, u16 vmvf_num,
5193 struct ice_sq_cd *cd)
5194 {
5195 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
5196 struct ice_aqc_dis_txq_item *qg_list;
5197 struct ice_q_ctx *q_ctx;
5198 struct ice_hw *hw;
5199 u16 i, buf_size;
5200
5201 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5202 return ICE_ERR_CFG;
5203
5204 hw = pi->hw;
5205
5206 if (!num_queues) {
5207 /* if queue is disabled already yet the disable queue command
5208 * has to be sent to complete the VF reset, then call
5209 * ice_aq_dis_lan_txq without any queue information
5210 */
5211 if (rst_src)
5212 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5213 vmvf_num, NULL);
5214 return ICE_ERR_CFG;
5215 }
5216
5217 buf_size = ice_struct_size(qg_list, q_id, 1);
5218 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5219 if (!qg_list)
5220 return ICE_ERR_NO_MEMORY;
5221
5222 ice_acquire_lock(&pi->sched_lock);
5223
5224 for (i = 0; i < num_queues; i++) {
5225 struct ice_sched_node *node;
5226
5227 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5228 if (!node)
5229 continue;
5230 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5231 if (!q_ctx) {
5232 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5233 q_handles[i]);
5234 continue;
5235 }
5236 if (q_ctx->q_handle != q_handles[i]) {
5237 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5238 q_ctx->q_handle, q_handles[i]);
5239 continue;
5240 }
5241 qg_list->parent_teid = node->info.parent_teid;
5242 qg_list->num_qs = 1;
5243 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5244 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5245 vmvf_num, cd);
5246
5247 if (status != ICE_SUCCESS)
5248 break;
5249 ice_free_sched_node(pi, node);
5250 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5251 }
5252 ice_release_lock(&pi->sched_lock);
5253 ice_free(hw, qg_list);
5254 return status;
5255 }
5256
5257 /**
5258 * ice_cfg_vsi_qs - configure the new/existing VSI queues
5259 * @pi: port information structure
5260 * @vsi_handle: software VSI handle
5261 * @tc_bitmap: TC bitmap
5262 * @maxqs: max queues array per TC
5263 * @owner: LAN or RDMA
5264 *
5265 * This function adds/updates the VSI queues per TC.
5266 */
5267 static enum ice_status
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * maxqs,u8 owner)5268 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5269 u16 *maxqs, u8 owner)
5270 {
5271 enum ice_status status = ICE_SUCCESS;
5272 u8 i;
5273
5274 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5275 return ICE_ERR_CFG;
5276
5277 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5278 return ICE_ERR_PARAM;
5279
5280 ice_acquire_lock(&pi->sched_lock);
5281
5282 ice_for_each_traffic_class(i) {
5283 /* configuration is possible only if TC node is present */
5284 if (!ice_sched_get_tc_node(pi, i))
5285 continue;
5286
5287 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5288 ice_is_tc_ena(tc_bitmap, i));
5289 if (status)
5290 break;
5291 }
5292
5293 ice_release_lock(&pi->sched_lock);
5294 return status;
5295 }
5296
5297 /**
5298 * ice_cfg_vsi_lan - configure VSI LAN queues
5299 * @pi: port information structure
5300 * @vsi_handle: software VSI handle
5301 * @tc_bitmap: TC bitmap
5302 * @max_lanqs: max LAN queues array per TC
5303 *
5304 * This function adds/updates the VSI LAN queues per TC.
5305 */
5306 enum ice_status
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_lanqs)5307 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5308 u16 *max_lanqs)
5309 {
5310 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5311 ICE_SCHED_NODE_OWNER_LAN);
5312 }
5313
5314 /**
5315 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
5316 * @pi: port information structure
5317 * @vsi_handle: software VSI handle
5318 * @tc_bitmap: TC bitmap
5319 * @max_rdmaqs: max RDMA queues array per TC
5320 *
5321 * This function adds/updates the VSI RDMA queues per TC.
5322 */
5323 enum ice_status
ice_cfg_vsi_rdma(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_rdmaqs)5324 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5325 u16 *max_rdmaqs)
5326 {
5327 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
5328 ICE_SCHED_NODE_OWNER_RDMA);
5329 }
5330
5331 /**
5332 * ice_ena_vsi_rdma_qset
5333 * @pi: port information structure
5334 * @vsi_handle: software VSI handle
5335 * @tc: TC number
5336 * @rdma_qset: pointer to RDMA qset
5337 * @num_qsets: number of RDMA qsets
5338 * @qset_teid: pointer to qset node teids
5339 *
5340 * This function adds RDMA qset
5341 */
5342 enum ice_status
ice_ena_vsi_rdma_qset(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 * rdma_qset,u16 num_qsets,u32 * qset_teid)5343 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
5344 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
5345 {
5346 struct ice_aqc_txsched_elem_data node = { 0 };
5347 struct ice_aqc_add_rdma_qset_data *buf;
5348 struct ice_sched_node *parent;
5349 enum ice_status status;
5350 struct ice_hw *hw;
5351 u16 i, buf_size;
5352
5353 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5354 return ICE_ERR_CFG;
5355 hw = pi->hw;
5356
5357 if (!ice_is_vsi_valid(hw, vsi_handle))
5358 return ICE_ERR_PARAM;
5359
5360 buf_size = ice_struct_size(buf, rdma_qsets, num_qsets);
5361 buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size);
5362 if (!buf)
5363 return ICE_ERR_NO_MEMORY;
5364 ice_acquire_lock(&pi->sched_lock);
5365
5366 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5367 ICE_SCHED_NODE_OWNER_RDMA);
5368 if (!parent) {
5369 status = ICE_ERR_PARAM;
5370 goto rdma_error_exit;
5371 }
5372 buf->parent_teid = parent->info.node_teid;
5373 node.parent_teid = parent->info.node_teid;
5374
5375 buf->num_qsets = CPU_TO_LE16(num_qsets);
5376 for (i = 0; i < num_qsets; i++) {
5377 buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]);
5378 buf->rdma_qsets[i].info.valid_sections =
5379 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5380 ICE_AQC_ELEM_VALID_EIR;
5381 buf->rdma_qsets[i].info.generic = 0;
5382 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
5383 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5384 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
5385 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5386 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
5387 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5388 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
5389 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5390 }
5391 status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
5392 if (status != ICE_SUCCESS) {
5393 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
5394 goto rdma_error_exit;
5395 }
5396 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5397 for (i = 0; i < num_qsets; i++) {
5398 node.node_teid = buf->rdma_qsets[i].qset_teid;
5399 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
5400 &node, NULL);
5401 if (status)
5402 break;
5403 qset_teid[i] = LE32_TO_CPU(node.node_teid);
5404 }
5405 rdma_error_exit:
5406 ice_release_lock(&pi->sched_lock);
5407 ice_free(hw, buf);
5408 return status;
5409 }
5410
5411 /**
5412 * ice_dis_vsi_rdma_qset - free RDMA resources
5413 * @pi: port_info struct
5414 * @count: number of RDMA qsets to free
5415 * @qset_teid: TEID of qset node
5416 * @q_id: list of queue IDs being disabled
5417 */
5418 enum ice_status
ice_dis_vsi_rdma_qset(struct ice_port_info * pi,u16 count,u32 * qset_teid,u16 * q_id)5419 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
5420 u16 *q_id)
5421 {
5422 struct ice_aqc_dis_txq_item *qg_list;
5423 enum ice_status status = ICE_SUCCESS;
5424 struct ice_hw *hw;
5425 u16 qg_size;
5426 int i;
5427
5428 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5429 return ICE_ERR_CFG;
5430
5431 hw = pi->hw;
5432
5433 qg_size = ice_struct_size(qg_list, q_id, 1);
5434 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size);
5435 if (!qg_list)
5436 return ICE_ERR_NO_MEMORY;
5437
5438 ice_acquire_lock(&pi->sched_lock);
5439
5440 for (i = 0; i < count; i++) {
5441 struct ice_sched_node *node;
5442
5443 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
5444 if (!node)
5445 continue;
5446
5447 qg_list->parent_teid = node->info.parent_teid;
5448 qg_list->num_qs = 1;
5449 qg_list->q_id[0] =
5450 CPU_TO_LE16(q_id[i] |
5451 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
5452
5453 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
5454 ICE_NO_RESET, 0, NULL);
5455 if (status)
5456 break;
5457
5458 ice_free_sched_node(pi, node);
5459 }
5460
5461 ice_release_lock(&pi->sched_lock);
5462 ice_free(hw, qg_list);
5463 return status;
5464 }
5465
5466 /**
5467 * ice_aq_get_sensor_reading
5468 * @hw: pointer to the HW struct
5469 * @sensor: sensor type
5470 * @format: requested response format
5471 * @data: pointer to data to be read from the sensor
5472 * @cd: pointer to command details structure or NULL
5473 *
5474 * Get sensor reading (0x0632)
5475 */
5476 enum ice_status
ice_aq_get_sensor_reading(struct ice_hw * hw,u8 sensor,u8 format,struct ice_aqc_get_sensor_reading_resp * data,struct ice_sq_cd * cd)5477 ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
5478 struct ice_aqc_get_sensor_reading_resp *data,
5479 struct ice_sq_cd *cd)
5480 {
5481 struct ice_aqc_get_sensor_reading *cmd;
5482 struct ice_aq_desc desc;
5483 enum ice_status status;
5484
5485 if (!data)
5486 return ICE_ERR_PARAM;
5487
5488 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
5489 cmd = &desc.params.get_sensor_reading;
5490 cmd->sensor = sensor;
5491 cmd->format = format;
5492
5493 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5494
5495 if (!status)
5496 ice_memcpy(data, &desc.params.get_sensor_reading_resp,
5497 sizeof(*data), ICE_NONDMA_TO_NONDMA);
5498
5499 return status;
5500 }
5501
5502 /**
5503 * ice_is_main_vsi - checks whether the VSI is main VSI
5504 * @hw: pointer to the HW struct
5505 * @vsi_handle: VSI handle
5506 *
5507 * Checks whether the VSI is the main VSI (the first PF VSI created on
5508 * given PF).
5509 */
ice_is_main_vsi(struct ice_hw * hw,u16 vsi_handle)5510 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5511 {
5512 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5513 }
5514
5515 /**
5516 * ice_replay_pre_init - replay pre initialization
5517 * @hw: pointer to the HW struct
5518 * @sw: pointer to switch info struct for which function initializes filters
5519 *
5520 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5521 */
5522 enum ice_status
ice_replay_pre_init(struct ice_hw * hw,struct ice_switch_info * sw)5523 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5524 {
5525 enum ice_status status;
5526 u8 i;
5527
5528 /* Delete old entries from replay filter list head if there is any */
5529 ice_rm_sw_replay_rule_info(hw, sw);
5530 /* In start of replay, move entries into replay_rules list, it
5531 * will allow adding rules entries back to filt_rules list,
5532 * which is operational list.
5533 */
5534 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5535 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5536 &sw->recp_list[i].filt_replay_rules);
5537 ice_sched_replay_agg_vsi_preinit(hw);
5538
5539 status = ice_sched_replay_root_node_bw(hw->port_info);
5540 if (status)
5541 return status;
5542
5543 return ice_sched_replay_tc_node_bw(hw->port_info);
5544 }
5545
5546 /**
5547 * ice_replay_vsi - replay VSI configuration
5548 * @hw: pointer to the HW struct
5549 * @vsi_handle: driver VSI handle
5550 *
5551 * Restore all VSI configuration after reset. It is required to call this
5552 * function with main VSI first.
5553 */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)5554 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5555 {
5556 struct ice_switch_info *sw = hw->switch_info;
5557 struct ice_port_info *pi = hw->port_info;
5558 enum ice_status status;
5559
5560 if (!ice_is_vsi_valid(hw, vsi_handle))
5561 return ICE_ERR_PARAM;
5562
5563 /* Replay pre-initialization if there is any */
5564 if (ice_is_main_vsi(hw, vsi_handle)) {
5565 status = ice_replay_pre_init(hw, sw);
5566 if (status)
5567 return status;
5568 }
5569 /* Replay per VSI all RSS configurations */
5570 status = ice_replay_rss_cfg(hw, vsi_handle);
5571 if (status)
5572 return status;
5573 /* Replay per VSI all filters */
5574 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5575 if (!status)
5576 status = ice_replay_vsi_agg(hw, vsi_handle);
5577 return status;
5578 }
5579
5580 /**
5581 * ice_replay_post - post replay configuration cleanup
5582 * @hw: pointer to the HW struct
5583 *
5584 * Post replay cleanup.
5585 */
ice_replay_post(struct ice_hw * hw)5586 void ice_replay_post(struct ice_hw *hw)
5587 {
5588 /* Delete old entries from replay filter list head */
5589 ice_rm_all_sw_replay_rule_info(hw);
5590 ice_sched_replay_agg(hw);
5591 }
5592
5593 /**
5594 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5595 * @hw: ptr to the hardware info
5596 * @reg: offset of 64 bit HW register to read from
5597 * @prev_stat_loaded: bool to specify if previous stats are loaded
5598 * @prev_stat: ptr to previous loaded stat value
5599 * @cur_stat: ptr to current stat value
5600 */
5601 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5602 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5603 u64 *prev_stat, u64 *cur_stat)
5604 {
5605 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5606
5607 /* device stats are not reset at PFR, they likely will not be zeroed
5608 * when the driver starts. Thus, save the value from the first read
5609 * without adding to the statistic value so that we report stats which
5610 * count up from zero.
5611 */
5612 if (!prev_stat_loaded) {
5613 *prev_stat = new_data;
5614 return;
5615 }
5616
5617 /* Calculate the difference between the new and old values, and then
5618 * add it to the software stat value.
5619 */
5620 if (new_data >= *prev_stat)
5621 *cur_stat += new_data - *prev_stat;
5622 else
5623 /* to manage the potential roll-over */
5624 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5625
5626 /* Update the previously stored value to prepare for next read */
5627 *prev_stat = new_data;
5628 }
5629
5630 /**
5631 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5632 * @hw: ptr to the hardware info
5633 * @reg: offset of HW register to read from
5634 * @prev_stat_loaded: bool to specify if previous stats are loaded
5635 * @prev_stat: ptr to previous loaded stat value
5636 * @cur_stat: ptr to current stat value
5637 */
5638 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5639 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5640 u64 *prev_stat, u64 *cur_stat)
5641 {
5642 u32 new_data;
5643
5644 new_data = rd32(hw, reg);
5645
5646 /* device stats are not reset at PFR, they likely will not be zeroed
5647 * when the driver starts. Thus, save the value from the first read
5648 * without adding to the statistic value so that we report stats which
5649 * count up from zero.
5650 */
5651 if (!prev_stat_loaded) {
5652 *prev_stat = new_data;
5653 return;
5654 }
5655
5656 /* Calculate the difference between the new and old values, and then
5657 * add it to the software stat value.
5658 */
5659 if (new_data >= *prev_stat)
5660 *cur_stat += new_data - *prev_stat;
5661 else
5662 /* to manage the potential roll-over */
5663 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5664
5665 /* Update the previously stored value to prepare for next read */
5666 *prev_stat = new_data;
5667 }
5668
5669 /**
5670 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5671 * @hw: ptr to the hardware info
5672 * @vsi_handle: VSI handle
5673 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5674 * @cur_stats: ptr to current stats structure
5675 *
5676 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5677 * thus cannot be read using the normal ice_stat_update32 function.
5678 *
5679 * Read the GLV_REPC register associated with the given VSI, and update the
5680 * rx_no_desc and rx_error values in the ice_eth_stats structure.
5681 *
5682 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5683 * cleared each time it's read.
5684 *
5685 * Note that the GLV_RDPC register also counts the causes that would trigger
5686 * GLV_REPC. However, it does not give the finer grained detail about why the
5687 * packets are being dropped. The GLV_REPC values can be used to distinguish
5688 * whether Rx packets are dropped due to errors or due to no available
5689 * descriptors.
5690 */
5691 void
ice_stat_update_repc(struct ice_hw * hw,u16 vsi_handle,bool prev_stat_loaded,struct ice_eth_stats * cur_stats)5692 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5693 struct ice_eth_stats *cur_stats)
5694 {
5695 u16 vsi_num, no_desc, error_cnt;
5696 u32 repc;
5697
5698 if (!ice_is_vsi_valid(hw, vsi_handle))
5699 return;
5700
5701 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5702
5703 /* If we haven't loaded stats yet, just clear the current value */
5704 if (!prev_stat_loaded) {
5705 wr32(hw, GLV_REPC(vsi_num), 0);
5706 return;
5707 }
5708
5709 repc = rd32(hw, GLV_REPC(vsi_num));
5710 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
5711 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
5712
5713 /* Clear the count by writing to the stats register */
5714 wr32(hw, GLV_REPC(vsi_num), 0);
5715
5716 cur_stats->rx_no_desc += no_desc;
5717 cur_stats->rx_errors += error_cnt;
5718 }
5719
5720 /**
5721 * ice_aq_alternate_write
5722 * @hw: pointer to the hardware structure
5723 * @reg_addr0: address of first dword to be written
5724 * @reg_val0: value to be written under 'reg_addr0'
5725 * @reg_addr1: address of second dword to be written
5726 * @reg_val1: value to be written under 'reg_addr1'
5727 *
5728 * Write one or two dwords to alternate structure. Fields are indicated
5729 * by 'reg_addr0' and 'reg_addr1' register numbers.
5730 */
5731 enum ice_status
ice_aq_alternate_write(struct ice_hw * hw,u32 reg_addr0,u32 reg_val0,u32 reg_addr1,u32 reg_val1)5732 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
5733 u32 reg_addr1, u32 reg_val1)
5734 {
5735 struct ice_aqc_read_write_alt_direct *cmd;
5736 struct ice_aq_desc desc;
5737 enum ice_status status;
5738
5739 cmd = &desc.params.read_write_alt_direct;
5740
5741 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
5742 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5743 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5744 cmd->dword0_value = CPU_TO_LE32(reg_val0);
5745 cmd->dword1_value = CPU_TO_LE32(reg_val1);
5746
5747 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5748
5749 return status;
5750 }
5751
5752 /**
5753 * ice_aq_alternate_read
5754 * @hw: pointer to the hardware structure
5755 * @reg_addr0: address of first dword to be read
5756 * @reg_val0: pointer for data read from 'reg_addr0'
5757 * @reg_addr1: address of second dword to be read
5758 * @reg_val1: pointer for data read from 'reg_addr1'
5759 *
5760 * Read one or two dwords from alternate structure. Fields are indicated
5761 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
5762 * is not passed then only register at 'reg_addr0' is read.
5763 */
5764 enum ice_status
ice_aq_alternate_read(struct ice_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)5765 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
5766 u32 reg_addr1, u32 *reg_val1)
5767 {
5768 struct ice_aqc_read_write_alt_direct *cmd;
5769 struct ice_aq_desc desc;
5770 enum ice_status status;
5771
5772 cmd = &desc.params.read_write_alt_direct;
5773
5774 if (!reg_val0)
5775 return ICE_ERR_PARAM;
5776
5777 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
5778 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
5779 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
5780
5781 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5782
5783 if (status == ICE_SUCCESS) {
5784 *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
5785
5786 if (reg_val1)
5787 *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
5788 }
5789
5790 return status;
5791 }
5792
5793 /**
5794 * ice_aq_alternate_write_done
5795 * @hw: pointer to the HW structure.
5796 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
5797 * @reset_needed: indicates the SW should trigger GLOBAL reset
5798 *
5799 * Indicates to the FW that alternate structures have been changed.
5800 */
5801 enum ice_status
ice_aq_alternate_write_done(struct ice_hw * hw,u8 bios_mode,bool * reset_needed)5802 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
5803 {
5804 struct ice_aqc_done_alt_write *cmd;
5805 struct ice_aq_desc desc;
5806 enum ice_status status;
5807
5808 cmd = &desc.params.done_alt_write;
5809
5810 if (!reset_needed)
5811 return ICE_ERR_PARAM;
5812
5813 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
5814 cmd->flags = bios_mode;
5815
5816 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5817 if (!status)
5818 *reset_needed = (LE16_TO_CPU(cmd->flags) &
5819 ICE_AQC_RESP_RESET_NEEDED) != 0;
5820
5821 return status;
5822 }
5823
5824 /**
5825 * ice_aq_alternate_clear
5826 * @hw: pointer to the HW structure.
5827 *
5828 * Clear the alternate structures of the port from which the function
5829 * is called.
5830 */
ice_aq_alternate_clear(struct ice_hw * hw)5831 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
5832 {
5833 struct ice_aq_desc desc;
5834 enum ice_status status;
5835
5836 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
5837
5838 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5839
5840 return status;
5841 }
5842
5843 /**
5844 * ice_sched_query_elem - query element information from HW
5845 * @hw: pointer to the HW struct
5846 * @node_teid: node TEID to be queried
5847 * @buf: buffer to element information
5848 *
5849 * This function queries HW element information
5850 */
5851 enum ice_status
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)5852 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5853 struct ice_aqc_txsched_elem_data *buf)
5854 {
5855 u16 buf_size, num_elem_ret = 0;
5856 enum ice_status status;
5857
5858 buf_size = sizeof(*buf);
5859 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
5860 buf->node_teid = CPU_TO_LE32(node_teid);
5861 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5862 NULL);
5863 if (status != ICE_SUCCESS || num_elem_ret != 1)
5864 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5865 return status;
5866 }
5867
5868 /**
5869 * ice_get_fw_mode - returns FW mode
5870 * @hw: pointer to the HW struct
5871 */
ice_get_fw_mode(struct ice_hw * hw)5872 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
5873 {
5874 #define ICE_FW_MODE_DBG_M BIT(0)
5875 #define ICE_FW_MODE_REC_M BIT(1)
5876 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
5877 u32 fw_mode;
5878
5879 /* check the current FW mode */
5880 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
5881 if (fw_mode & ICE_FW_MODE_DBG_M)
5882 return ICE_FW_MODE_DBG;
5883 else if (fw_mode & ICE_FW_MODE_REC_M)
5884 return ICE_FW_MODE_REC;
5885 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
5886 return ICE_FW_MODE_ROLLBACK;
5887 else
5888 return ICE_FW_MODE_NORMAL;
5889 }
5890
5891 /**
5892 * ice_get_cur_lldp_persist_status
5893 * @hw: pointer to the HW struct
5894 * @lldp_status: return value of LLDP persistent status
5895 *
5896 * Get the current status of LLDP persistent
5897 */
5898 enum ice_status
ice_get_cur_lldp_persist_status(struct ice_hw * hw,u32 * lldp_status)5899 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5900 {
5901 struct ice_port_info *pi = hw->port_info;
5902 enum ice_status ret;
5903 __le32 raw_data;
5904 u32 data, mask;
5905
5906 if (!lldp_status)
5907 return ICE_ERR_BAD_PTR;
5908
5909 ret = ice_acquire_nvm(hw, ICE_RES_READ);
5910 if (ret)
5911 return ret;
5912
5913 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
5914 ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
5915 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
5916 false, true, NULL);
5917 if (!ret) {
5918 data = LE32_TO_CPU(raw_data);
5919 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5920 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5921 data = data & mask;
5922 *lldp_status = data >>
5923 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5924 }
5925
5926 ice_release_nvm(hw);
5927
5928 return ret;
5929 }
5930
5931 /**
5932 * ice_get_dflt_lldp_persist_status
5933 * @hw: pointer to the HW struct
5934 * @lldp_status: return value of LLDP persistent status
5935 *
5936 * Get the default status of LLDP persistent
5937 */
5938 enum ice_status
ice_get_dflt_lldp_persist_status(struct ice_hw * hw,u32 * lldp_status)5939 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
5940 {
5941 struct ice_port_info *pi = hw->port_info;
5942 u32 data, mask, loc_data, loc_data_tmp;
5943 enum ice_status ret;
5944 __le16 loc_raw_data;
5945 __le32 raw_data;
5946
5947 if (!lldp_status)
5948 return ICE_ERR_BAD_PTR;
5949
5950 ret = ice_acquire_nvm(hw, ICE_RES_READ);
5951 if (ret)
5952 return ret;
5953
5954 /* Read the offset of EMP_SR_PTR */
5955 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
5956 ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
5957 ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
5958 &loc_raw_data, false, true, NULL);
5959 if (ret)
5960 goto exit;
5961
5962 loc_data = LE16_TO_CPU(loc_raw_data);
5963 if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
5964 loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
5965 loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
5966 } else {
5967 loc_data *= ICE_AQC_NVM_WORD_UNIT;
5968 }
5969
5970 /* Read the offset of LLDP configuration pointer */
5971 loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
5972 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5973 ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
5974 false, true, NULL);
5975 if (ret)
5976 goto exit;
5977
5978 loc_data_tmp = LE16_TO_CPU(loc_raw_data);
5979 loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
5980 loc_data += loc_data_tmp;
5981
5982 /* We need to skip LLDP configuration section length (2 bytes) */
5983 loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
5984
5985 /* Read the LLDP Default Configure */
5986 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5987 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
5988 true, NULL);
5989 if (!ret) {
5990 data = LE32_TO_CPU(raw_data);
5991 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5992 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5993 data = data & mask;
5994 *lldp_status = data >>
5995 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5996 }
5997
5998 exit:
5999 ice_release_nvm(hw);
6000
6001 return ret;
6002 }
6003
6004 /**
6005 * ice_aq_read_i2c
6006 * @hw: pointer to the hw struct
6007 * @topo_addr: topology address for a device to communicate with
6008 * @bus_addr: 7-bit I2C bus address
6009 * @addr: I2C memory address (I2C offset) with up to 16 bits
6010 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
6011 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
6012 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
6013 * @cd: pointer to command details structure or NULL
6014 *
6015 * Read I2C (0x06E2)
6016 */
6017 enum ice_status
ice_aq_read_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data,struct ice_sq_cd * cd)6018 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
6019 u16 bus_addr, __le16 addr, u8 params, u8 *data,
6020 struct ice_sq_cd *cd)
6021 {
6022 struct ice_aq_desc desc = { 0 };
6023 struct ice_aqc_i2c *cmd;
6024 enum ice_status status;
6025 u8 data_size;
6026
6027 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
6028 cmd = &desc.params.read_write_i2c;
6029
6030 if (!data)
6031 return ICE_ERR_PARAM;
6032
6033 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
6034
6035 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
6036 cmd->topo_addr = topo_addr;
6037 cmd->i2c_params = params;
6038 cmd->i2c_addr = addr;
6039
6040 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6041 if (!status) {
6042 struct ice_aqc_read_i2c_resp *resp;
6043 u8 i;
6044
6045 resp = &desc.params.read_i2c_resp;
6046 for (i = 0; i < data_size; i++) {
6047 *data = resp->i2c_data[i];
6048 data++;
6049 }
6050 }
6051
6052 return status;
6053 }
6054
6055 /**
6056 * ice_aq_write_i2c
6057 * @hw: pointer to the hw struct
6058 * @topo_addr: topology address for a device to communicate with
6059 * @bus_addr: 7-bit I2C bus address
6060 * @addr: I2C memory address (I2C offset) with up to 16 bits
6061 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
6062 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
6063 * @cd: pointer to command details structure or NULL
6064 *
6065 * Write I2C (0x06E3)
6066 */
6067 enum ice_status
ice_aq_write_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,const u8 * data,struct ice_sq_cd * cd)6068 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
6069 u16 bus_addr, __le16 addr, u8 params, const u8 *data,
6070 struct ice_sq_cd *cd)
6071 {
6072 struct ice_aq_desc desc = { 0 };
6073 struct ice_aqc_i2c *cmd;
6074 u8 i, data_size;
6075
6076 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
6077 cmd = &desc.params.read_write_i2c;
6078
6079 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
6080
6081 /* data_size limited to 4 */
6082 if (data_size > 4)
6083 return ICE_ERR_PARAM;
6084
6085 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
6086 cmd->topo_addr = topo_addr;
6087 cmd->i2c_params = params;
6088 cmd->i2c_addr = addr;
6089
6090 for (i = 0; i < data_size; i++) {
6091 cmd->i2c_data[i] = *data;
6092 data++;
6093 }
6094
6095 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6096 }
6097
6098 /**
6099 * ice_aq_set_gpio
6100 * @hw: pointer to the hw struct
6101 * @gpio_ctrl_handle: GPIO controller node handle
6102 * @pin_idx: IO Number of the GPIO that needs to be set
6103 * @value: SW provide IO value to set in the LSB
6104 * @cd: pointer to command details structure or NULL
6105 *
6106 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
6107 */
6108 enum ice_status
ice_aq_set_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value,struct ice_sq_cd * cd)6109 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
6110 struct ice_sq_cd *cd)
6111 {
6112 struct ice_aqc_gpio *cmd;
6113 struct ice_aq_desc desc;
6114
6115 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
6116 cmd = &desc.params.read_write_gpio;
6117 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6118 cmd->gpio_num = pin_idx;
6119 cmd->gpio_val = value ? 1 : 0;
6120
6121 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6122 }
6123
6124 /**
6125 * ice_aq_get_gpio
6126 * @hw: pointer to the hw struct
6127 * @gpio_ctrl_handle: GPIO controller node handle
6128 * @pin_idx: IO Number of the GPIO that needs to be set
6129 * @value: IO value read
6130 * @cd: pointer to command details structure or NULL
6131 *
6132 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
6133 * the topology
6134 */
6135 enum ice_status
ice_aq_get_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value,struct ice_sq_cd * cd)6136 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
6137 bool *value, struct ice_sq_cd *cd)
6138 {
6139 struct ice_aqc_gpio *cmd;
6140 struct ice_aq_desc desc;
6141 enum ice_status status;
6142
6143 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
6144 cmd = &desc.params.read_write_gpio;
6145 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6146 cmd->gpio_num = pin_idx;
6147
6148 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6149 if (status)
6150 return status;
6151
6152 *value = !!cmd->gpio_val;
6153 return ICE_SUCCESS;
6154 }
6155
6156 /**
6157 * ice_is_fw_api_min_ver
6158 * @hw: pointer to the hardware structure
6159 * @maj: major version
6160 * @min: minor version
6161 * @patch: patch version
6162 *
6163 * Checks if the firmware is minimum version
6164 */
ice_is_fw_api_min_ver(struct ice_hw * hw,u8 maj,u8 min,u8 patch)6165 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
6166 {
6167 if (hw->api_maj_ver == maj) {
6168 if (hw->api_min_ver > min)
6169 return true;
6170 if (hw->api_min_ver == min && hw->api_patch >= patch)
6171 return true;
6172 } else if (hw->api_maj_ver > maj) {
6173 return true;
6174 }
6175
6176 return false;
6177 }
6178
6179 /**
6180 * ice_is_fw_min_ver
6181 * @hw: pointer to the hardware structure
6182 * @branch: branch version
6183 * @maj: major version
6184 * @min: minor version
6185 * @patch: patch version
6186 *
6187 * Checks if the firmware is minimum version
6188 */
ice_is_fw_min_ver(struct ice_hw * hw,u8 branch,u8 maj,u8 min,u8 patch)6189 static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
6190 u8 patch)
6191 {
6192 if (hw->fw_branch == branch) {
6193 if (hw->fw_maj_ver > maj)
6194 return true;
6195 if (hw->fw_maj_ver == maj) {
6196 if (hw->fw_min_ver > min)
6197 return true;
6198 if (hw->fw_min_ver == min && hw->fw_patch >= patch)
6199 return true;
6200 }
6201 }
6202
6203 return false;
6204 }
6205
6206 /**
6207 * ice_fw_supports_link_override
6208 * @hw: pointer to the hardware structure
6209 *
6210 * Checks if the firmware supports link override
6211 */
ice_fw_supports_link_override(struct ice_hw * hw)6212 bool ice_fw_supports_link_override(struct ice_hw *hw)
6213 {
6214 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
6215 ICE_FW_API_LINK_OVERRIDE_MIN,
6216 ICE_FW_API_LINK_OVERRIDE_PATCH);
6217 }
6218
6219 /**
6220 * ice_get_link_default_override
6221 * @ldo: pointer to the link default override struct
6222 * @pi: pointer to the port info struct
6223 *
6224 * Gets the link default override for a port
6225 */
6226 enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)6227 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
6228 struct ice_port_info *pi)
6229 {
6230 u16 i, tlv, tlv_len, tlv_start, buf, offset;
6231 struct ice_hw *hw = pi->hw;
6232 enum ice_status status;
6233
6234 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
6235 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
6236 if (status) {
6237 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
6238 return status;
6239 }
6240
6241 /* Each port has its own config; calculate for our port */
6242 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
6243 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
6244
6245 /* link options first */
6246 status = ice_read_sr_word(hw, tlv_start, &buf);
6247 if (status) {
6248 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6249 return status;
6250 }
6251 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
6252 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
6253 ICE_LINK_OVERRIDE_PHY_CFG_S;
6254
6255 /* link PHY config */
6256 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
6257 status = ice_read_sr_word(hw, offset, &buf);
6258 if (status) {
6259 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
6260 return status;
6261 }
6262 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
6263
6264 /* PHY types low */
6265 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
6266 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6267 status = ice_read_sr_word(hw, (offset + i), &buf);
6268 if (status) {
6269 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6270 return status;
6271 }
6272 /* shift 16 bits at a time to fill 64 bits */
6273 ldo->phy_type_low |= ((u64)buf << (i * 16));
6274 }
6275
6276 /* PHY types high */
6277 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
6278 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
6279 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6280 status = ice_read_sr_word(hw, (offset + i), &buf);
6281 if (status) {
6282 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6283 return status;
6284 }
6285 /* shift 16 bits at a time to fill 64 bits */
6286 ldo->phy_type_high |= ((u64)buf << (i * 16));
6287 }
6288
6289 return status;
6290 }
6291
6292 /**
6293 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
6294 * @caps: get PHY capability data
6295 */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)6296 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
6297 {
6298 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
6299 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
6300 ICE_AQC_PHY_AN_EN_CLAUSE73 |
6301 ICE_AQC_PHY_AN_EN_CLAUSE37))
6302 return true;
6303
6304 return false;
6305 }
6306
6307 /**
6308 * ice_is_fw_health_report_supported
6309 * @hw: pointer to the hardware structure
6310 *
6311 * Return true if firmware supports health status reports,
6312 * false otherwise
6313 */
ice_is_fw_health_report_supported(struct ice_hw * hw)6314 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
6315 {
6316 if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
6317 return true;
6318
6319 if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
6320 if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
6321 return true;
6322 if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
6323 hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
6324 return true;
6325 }
6326
6327 return false;
6328 }
6329
6330 /**
6331 * ice_aq_set_health_status_config - Configure FW health events
6332 * @hw: pointer to the HW struct
6333 * @event_source: type of diagnostic events to enable
6334 * @cd: pointer to command details structure or NULL
6335 *
6336 * Configure the health status event types that the firmware will send to this
6337 * PF. The supported event types are: PF-specific, all PFs, and global
6338 */
6339 enum ice_status
ice_aq_set_health_status_config(struct ice_hw * hw,u8 event_source,struct ice_sq_cd * cd)6340 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
6341 struct ice_sq_cd *cd)
6342 {
6343 struct ice_aqc_set_health_status_config *cmd;
6344 struct ice_aq_desc desc;
6345
6346 cmd = &desc.params.set_health_status_config;
6347
6348 ice_fill_dflt_direct_cmd_desc(&desc,
6349 ice_aqc_opc_set_health_status_config);
6350
6351 cmd->event_source = event_source;
6352
6353 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6354 }
6355
6356 /**
6357 * ice_aq_get_port_options
6358 * @hw: pointer to the hw struct
6359 * @options: buffer for the resultant port options
6360 * @option_count: input - size of the buffer in port options structures,
6361 * output - number of returned port options
6362 * @lport: logical port to call the command with (optional)
6363 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6364 * when PF owns more than 1 port it must be true
6365 * @active_option_idx: index of active port option in returned buffer
6366 * @active_option_valid: active option in returned buffer is valid
6367 * @pending_option_idx: index of pending port option in returned buffer
6368 * @pending_option_valid: pending option in returned buffer is valid
6369 *
6370 * Calls Get Port Options AQC (0x06ea) and verifies result.
6371 */
6372 enum ice_status
ice_aq_get_port_options(struct ice_hw * hw,struct ice_aqc_get_port_options_elem * options,u8 * option_count,u8 lport,bool lport_valid,u8 * active_option_idx,bool * active_option_valid,u8 * pending_option_idx,bool * pending_option_valid)6373 ice_aq_get_port_options(struct ice_hw *hw,
6374 struct ice_aqc_get_port_options_elem *options,
6375 u8 *option_count, u8 lport, bool lport_valid,
6376 u8 *active_option_idx, bool *active_option_valid,
6377 u8 *pending_option_idx, bool *pending_option_valid)
6378 {
6379 struct ice_aqc_get_port_options *cmd;
6380 struct ice_aq_desc desc;
6381 enum ice_status status;
6382 u8 i;
6383
6384 /* options buffer shall be able to hold max returned options */
6385 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
6386 return ICE_ERR_PARAM;
6387
6388 cmd = &desc.params.get_port_options;
6389 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
6390
6391 cmd->lport_num = lport;
6392 cmd->lport_num_valid = lport_valid;
6393
6394 status = ice_aq_send_cmd(hw, &desc, options,
6395 *option_count * sizeof(*options), NULL);
6396 if (status != ICE_SUCCESS)
6397 return status;
6398
6399 /* verify direct FW response & set output parameters */
6400 *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
6401 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
6402 *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
6403 if (*active_option_valid) {
6404 *active_option_idx = cmd->port_options &
6405 ICE_AQC_PORT_OPT_ACTIVE_M;
6406 if (*active_option_idx > (*option_count - 1))
6407 return ICE_ERR_OUT_OF_RANGE;
6408 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
6409 *active_option_idx);
6410 }
6411
6412 *pending_option_valid = cmd->pending_port_option_status &
6413 ICE_AQC_PENDING_PORT_OPT_VALID;
6414 if (*pending_option_valid) {
6415 *pending_option_idx = cmd->pending_port_option_status &
6416 ICE_AQC_PENDING_PORT_OPT_IDX_M;
6417 if (*pending_option_idx > (*option_count - 1))
6418 return ICE_ERR_OUT_OF_RANGE;
6419 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
6420 *pending_option_idx);
6421 }
6422
6423 /* mask output options fields */
6424 for (i = 0; i < *option_count; i++) {
6425 options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
6426 options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
6427 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
6428 options[i].pmd, options[i].max_lane_speed);
6429 }
6430
6431 return ICE_SUCCESS;
6432 }
6433
6434 /**
6435 * ice_aq_set_port_option
6436 * @hw: pointer to the hw struct
6437 * @lport: logical port to call the command with
6438 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6439 * when PF owns more than 1 port it must be true
6440 * @new_option: new port option to be written
6441 *
6442 * Calls Set Port Options AQC (0x06eb).
6443 */
6444 enum ice_status
ice_aq_set_port_option(struct ice_hw * hw,u8 lport,u8 lport_valid,u8 new_option)6445 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
6446 u8 new_option)
6447 {
6448 struct ice_aqc_set_port_option *cmd;
6449 struct ice_aq_desc desc;
6450
6451 if (new_option >= ICE_AQC_PORT_OPT_COUNT_M)
6452 return ICE_ERR_PARAM;
6453
6454 cmd = &desc.params.set_port_option;
6455 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
6456
6457 cmd->lport_num = lport;
6458
6459 cmd->lport_num_valid = lport_valid;
6460 cmd->selected_port_option = new_option;
6461
6462 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6463 }
6464
6465 /**
6466 * ice_aq_set_lldp_mib - Set the LLDP MIB
6467 * @hw: pointer to the HW struct
6468 * @mib_type: Local, Remote or both Local and Remote MIBs
6469 * @buf: pointer to the caller-supplied buffer to store the MIB block
6470 * @buf_size: size of the buffer (in bytes)
6471 * @cd: pointer to command details structure or NULL
6472 *
6473 * Set the LLDP MIB. (0x0A08)
6474 */
6475 enum ice_status
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)6476 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6477 struct ice_sq_cd *cd)
6478 {
6479 struct ice_aqc_lldp_set_local_mib *cmd;
6480 struct ice_aq_desc desc;
6481
6482 cmd = &desc.params.lldp_set_mib;
6483
6484 if (buf_size == 0 || !buf)
6485 return ICE_ERR_PARAM;
6486
6487 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
6488
6489 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
6490 desc.datalen = CPU_TO_LE16(buf_size);
6491
6492 cmd->type = mib_type;
6493 cmd->length = CPU_TO_LE16(buf_size);
6494
6495 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6496 }
6497
6498 /**
6499 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
6500 * @hw: pointer to HW struct
6501 */
ice_fw_supports_lldp_fltr_ctrl(struct ice_hw * hw)6502 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6503 {
6504 if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC)
6505 return false;
6506
6507 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
6508 ICE_FW_API_LLDP_FLTR_MIN,
6509 ICE_FW_API_LLDP_FLTR_PATCH);
6510 }
6511
6512 /**
6513 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6514 * @hw: pointer to HW struct
6515 * @vsi_num: absolute HW index for VSI
6516 * @add: boolean for if adding or removing a filter
6517 */
6518 enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw * hw,u16 vsi_num,bool add)6519 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
6520 {
6521 struct ice_aqc_lldp_filter_ctrl *cmd;
6522 struct ice_aq_desc desc;
6523
6524 cmd = &desc.params.lldp_filter_ctrl;
6525
6526 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
6527
6528 if (add)
6529 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6530 else
6531 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6532
6533 cmd->vsi_num = CPU_TO_LE16(vsi_num);
6534
6535 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6536 }
6537
6538 /**
6539 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
6540 * @hw: pointer to HW struct
6541 */
ice_lldp_execute_pending_mib(struct ice_hw * hw)6542 enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw)
6543 {
6544 struct ice_aq_desc desc;
6545
6546 ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib);
6547
6548 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6549 }
6550
6551 /**
6552 * ice_fw_supports_report_dflt_cfg
6553 * @hw: pointer to the hardware structure
6554 *
6555 * Checks if the firmware supports report default configuration
6556 */
ice_fw_supports_report_dflt_cfg(struct ice_hw * hw)6557 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6558 {
6559 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
6560 ICE_FW_API_REPORT_DFLT_CFG_MIN,
6561 ICE_FW_API_REPORT_DFLT_CFG_PATCH);
6562 }
6563
6564 /* each of the indexes into the following array match the speed of a return
6565 * value from the list of AQ returned speeds like the range:
6566 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
6567 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) The array is defined as 15
6568 * elements long because the link_speed returned by the firmware is a 16 bit
6569 * value, but is indexed by [fls(speed) - 1]
6570 */
6571 static const u32 ice_aq_to_link_speed[] = {
6572 ICE_LINK_SPEED_10MBPS, /* BIT(0) */
6573 ICE_LINK_SPEED_100MBPS,
6574 ICE_LINK_SPEED_1000MBPS,
6575 ICE_LINK_SPEED_2500MBPS,
6576 ICE_LINK_SPEED_5000MBPS,
6577 ICE_LINK_SPEED_10000MBPS,
6578 ICE_LINK_SPEED_20000MBPS,
6579 ICE_LINK_SPEED_25000MBPS,
6580 ICE_LINK_SPEED_40000MBPS,
6581 ICE_LINK_SPEED_50000MBPS,
6582 ICE_LINK_SPEED_100000MBPS, /* BIT(10) */
6583 };
6584
6585 /**
6586 * ice_get_link_speed - get integer speed from table
6587 * @index: array index from fls(aq speed) - 1
6588 *
6589 * Returns: u32 value containing integer speed
6590 */
ice_get_link_speed(u16 index)6591 u32 ice_get_link_speed(u16 index)
6592 {
6593 if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
6594 return ICE_LINK_SPEED_UNKNOWN;
6595
6596 return ice_aq_to_link_speed[index];
6597 }
6598
6599 /**
6600 * ice_fw_supports_fec_dis_auto
6601 * @hw: pointer to the hardware structure
6602 *
6603 * Checks if the firmware supports FEC disable in Auto FEC mode
6604 */
ice_fw_supports_fec_dis_auto(struct ice_hw * hw)6605 bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
6606 {
6607 return ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E810,
6608 ICE_FW_FEC_DIS_AUTO_MAJ,
6609 ICE_FW_FEC_DIS_AUTO_MIN,
6610 ICE_FW_FEC_DIS_AUTO_PATCH) ||
6611 ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E82X,
6612 ICE_FW_FEC_DIS_AUTO_MAJ_E82X,
6613 ICE_FW_FEC_DIS_AUTO_MIN_E82X,
6614 ICE_FW_FEC_DIS_AUTO_PATCH_E82X);
6615 }
6616
6617 /**
6618 * ice_is_fw_auto_drop_supported
6619 * @hw: pointer to the hardware structure
6620 *
6621 * Checks if the firmware supports auto drop feature
6622 */
ice_is_fw_auto_drop_supported(struct ice_hw * hw)6623 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
6624 {
6625 if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
6626 hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)
6627 return true;
6628 return false;
6629 }
6630
6631