1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 #include "ice_flow.h"
8
9 #define ICE_PF_RESET_WAIT_COUNT 300
10
11 /**
12 * ice_set_mac_type - Sets MAC type
13 * @hw: pointer to the HW structure
14 *
15 * This function sets the MAC type of the adapter based on the
16 * vendor ID and device ID stored in the HW structure.
17 */
ice_set_mac_type(struct ice_hw * hw)18 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
19 {
20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 return ICE_ERR_DEVICE_NOT_SUPPORTED;
22
23 switch (hw->device_id) {
24 case ICE_DEV_ID_E810C_BACKPLANE:
25 case ICE_DEV_ID_E810C_QSFP:
26 case ICE_DEV_ID_E810C_SFP:
27 case ICE_DEV_ID_E810_XXV_SFP:
28 hw->mac_type = ICE_MAC_E810;
29 break;
30 case ICE_DEV_ID_E823C_10G_BASE_T:
31 case ICE_DEV_ID_E823C_BACKPLANE:
32 case ICE_DEV_ID_E823C_QSFP:
33 case ICE_DEV_ID_E823C_SFP:
34 case ICE_DEV_ID_E823C_SGMII:
35 case ICE_DEV_ID_E822C_10G_BASE_T:
36 case ICE_DEV_ID_E822C_BACKPLANE:
37 case ICE_DEV_ID_E822C_QSFP:
38 case ICE_DEV_ID_E822C_SFP:
39 case ICE_DEV_ID_E822C_SGMII:
40 case ICE_DEV_ID_E822L_10G_BASE_T:
41 case ICE_DEV_ID_E822L_BACKPLANE:
42 case ICE_DEV_ID_E822L_SFP:
43 case ICE_DEV_ID_E822L_SGMII:
44 case ICE_DEV_ID_E823L_10G_BASE_T:
45 case ICE_DEV_ID_E823L_1GBE:
46 case ICE_DEV_ID_E823L_BACKPLANE:
47 case ICE_DEV_ID_E823L_QSFP:
48 case ICE_DEV_ID_E823L_SFP:
49 hw->mac_type = ICE_MAC_GENERIC;
50 break;
51 default:
52 hw->mac_type = ICE_MAC_UNKNOWN;
53 break;
54 }
55
56 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
57 return 0;
58 }
59
60 /**
61 * ice_clear_pf_cfg - Clear PF configuration
62 * @hw: pointer to the hardware structure
63 *
64 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
65 * configuration, flow director filters, etc.).
66 */
ice_clear_pf_cfg(struct ice_hw * hw)67 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
68 {
69 struct ice_aq_desc desc;
70
71 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
72
73 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
74 }
75
76 /**
77 * ice_aq_manage_mac_read - manage MAC address read command
78 * @hw: pointer to the HW struct
79 * @buf: a virtual buffer to hold the manage MAC read response
80 * @buf_size: Size of the virtual buffer
81 * @cd: pointer to command details structure or NULL
82 *
83 * This function is used to return per PF station MAC address (0x0107).
84 * NOTE: Upon successful completion of this command, MAC address information
85 * is returned in user specified buffer. Please interpret user specified
86 * buffer as "manage_mac_read" response.
87 * Response such as various MAC addresses are stored in HW struct (port.mac)
88 * ice_discover_dev_caps is expected to be called before this function is
89 * called.
90 */
91 static enum ice_status
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)92 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
93 struct ice_sq_cd *cd)
94 {
95 struct ice_aqc_manage_mac_read_resp *resp;
96 struct ice_aqc_manage_mac_read *cmd;
97 struct ice_aq_desc desc;
98 enum ice_status status;
99 u16 flags;
100 u8 i;
101
102 cmd = &desc.params.mac_read;
103
104 if (buf_size < sizeof(*resp))
105 return ICE_ERR_BUF_TOO_SHORT;
106
107 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
108
109 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
110 if (status)
111 return status;
112
113 resp = buf;
114 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
115
116 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
117 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
118 return ICE_ERR_CFG;
119 }
120
121 /* A single port can report up to two (LAN and WoL) addresses */
122 for (i = 0; i < cmd->num_addr; i++)
123 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
124 ether_addr_copy(hw->port_info->mac.lan_addr,
125 resp[i].mac_addr);
126 ether_addr_copy(hw->port_info->mac.perm_addr,
127 resp[i].mac_addr);
128 break;
129 }
130
131 return 0;
132 }
133
134 /**
135 * ice_aq_get_phy_caps - returns PHY capabilities
136 * @pi: port information structure
137 * @qual_mods: report qualified modules
138 * @report_mode: report mode capabilities
139 * @pcaps: structure for PHY capabilities to be filled
140 * @cd: pointer to command details structure or NULL
141 *
142 * Returns the various PHY capabilities supported on the Port (0x0600)
143 */
144 enum ice_status
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)145 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
146 struct ice_aqc_get_phy_caps_data *pcaps,
147 struct ice_sq_cd *cd)
148 {
149 struct ice_aqc_get_phy_caps *cmd;
150 u16 pcaps_size = sizeof(*pcaps);
151 struct ice_aq_desc desc;
152 enum ice_status status;
153 struct ice_hw *hw;
154
155 cmd = &desc.params.get_phy;
156
157 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
158 return ICE_ERR_PARAM;
159 hw = pi->hw;
160
161 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
162 !ice_fw_supports_report_dflt_cfg(hw))
163 return ICE_ERR_PARAM;
164
165 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
166
167 if (qual_mods)
168 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
169
170 cmd->param0 |= cpu_to_le16(report_mode);
171 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
172
173 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
174 report_mode);
175 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
176 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
177 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
178 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
179 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
180 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
181 pcaps->low_power_ctrl_an);
182 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
183 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
184 pcaps->eeer_value);
185 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
186 pcaps->link_fec_options);
187 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
188 pcaps->module_compliance_enforcement);
189 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
190 pcaps->extended_compliance_code);
191 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
192 pcaps->module_type[0]);
193 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
194 pcaps->module_type[1]);
195 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
196 pcaps->module_type[2]);
197
198 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
199 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
200 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
201 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
202 sizeof(pi->phy.link_info.module_type));
203 }
204
205 return status;
206 }
207
208 /**
209 * ice_aq_get_link_topo_handle - get link topology node return status
210 * @pi: port information structure
211 * @node_type: requested node type
212 * @cd: pointer to command details structure or NULL
213 *
214 * Get link topology node return status for specified node type (0x06E0)
215 *
216 * Node type cage can be used to determine if cage is present. If AQC
217 * returns error (ENOENT), then no cage present. If no cage present, then
218 * connection type is backplane or BASE-T.
219 */
220 static enum ice_status
ice_aq_get_link_topo_handle(struct ice_port_info * pi,u8 node_type,struct ice_sq_cd * cd)221 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
222 struct ice_sq_cd *cd)
223 {
224 struct ice_aqc_get_link_topo *cmd;
225 struct ice_aq_desc desc;
226
227 cmd = &desc.params.get_link_topo;
228
229 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
230
231 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
232 ICE_AQC_LINK_TOPO_NODE_CTX_S);
233
234 /* set node type */
235 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
236
237 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
238 }
239
240 /**
241 * ice_is_media_cage_present
242 * @pi: port information structure
243 *
244 * Returns true if media cage is present, else false. If no cage, then
245 * media type is backplane or BASE-T.
246 */
ice_is_media_cage_present(struct ice_port_info * pi)247 static bool ice_is_media_cage_present(struct ice_port_info *pi)
248 {
249 /* Node type cage can be used to determine if cage is present. If AQC
250 * returns error (ENOENT), then no cage present. If no cage present then
251 * connection type is backplane or BASE-T.
252 */
253 return !ice_aq_get_link_topo_handle(pi,
254 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
255 NULL);
256 }
257
258 /**
259 * ice_get_media_type - Gets media type
260 * @pi: port information structure
261 */
ice_get_media_type(struct ice_port_info * pi)262 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
263 {
264 struct ice_link_status *hw_link_info;
265
266 if (!pi)
267 return ICE_MEDIA_UNKNOWN;
268
269 hw_link_info = &pi->phy.link_info;
270 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
271 /* If more than one media type is selected, report unknown */
272 return ICE_MEDIA_UNKNOWN;
273
274 if (hw_link_info->phy_type_low) {
275 /* 1G SGMII is a special case where some DA cable PHYs
276 * may show this as an option when it really shouldn't
277 * be since SGMII is meant to be between a MAC and a PHY
278 * in a backplane. Try to detect this case and handle it
279 */
280 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
281 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
282 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
283 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
284 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
285 return ICE_MEDIA_DA;
286
287 switch (hw_link_info->phy_type_low) {
288 case ICE_PHY_TYPE_LOW_1000BASE_SX:
289 case ICE_PHY_TYPE_LOW_1000BASE_LX:
290 case ICE_PHY_TYPE_LOW_10GBASE_SR:
291 case ICE_PHY_TYPE_LOW_10GBASE_LR:
292 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
293 case ICE_PHY_TYPE_LOW_25GBASE_SR:
294 case ICE_PHY_TYPE_LOW_25GBASE_LR:
295 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
296 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
297 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
298 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
299 case ICE_PHY_TYPE_LOW_50GBASE_SR:
300 case ICE_PHY_TYPE_LOW_50GBASE_FR:
301 case ICE_PHY_TYPE_LOW_50GBASE_LR:
302 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
303 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
304 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
305 case ICE_PHY_TYPE_LOW_100GBASE_DR:
306 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
307 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
308 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
309 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
310 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
311 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
312 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
313 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
314 return ICE_MEDIA_FIBER;
315 case ICE_PHY_TYPE_LOW_100BASE_TX:
316 case ICE_PHY_TYPE_LOW_1000BASE_T:
317 case ICE_PHY_TYPE_LOW_2500BASE_T:
318 case ICE_PHY_TYPE_LOW_5GBASE_T:
319 case ICE_PHY_TYPE_LOW_10GBASE_T:
320 case ICE_PHY_TYPE_LOW_25GBASE_T:
321 return ICE_MEDIA_BASET;
322 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
323 case ICE_PHY_TYPE_LOW_25GBASE_CR:
324 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
325 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
326 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
327 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
328 case ICE_PHY_TYPE_LOW_50GBASE_CP:
329 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
330 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
331 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
332 return ICE_MEDIA_DA;
333 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
334 case ICE_PHY_TYPE_LOW_40G_XLAUI:
335 case ICE_PHY_TYPE_LOW_50G_LAUI2:
336 case ICE_PHY_TYPE_LOW_50G_AUI2:
337 case ICE_PHY_TYPE_LOW_50G_AUI1:
338 case ICE_PHY_TYPE_LOW_100G_AUI4:
339 case ICE_PHY_TYPE_LOW_100G_CAUI4:
340 if (ice_is_media_cage_present(pi))
341 return ICE_MEDIA_DA;
342 fallthrough;
343 case ICE_PHY_TYPE_LOW_1000BASE_KX:
344 case ICE_PHY_TYPE_LOW_2500BASE_KX:
345 case ICE_PHY_TYPE_LOW_2500BASE_X:
346 case ICE_PHY_TYPE_LOW_5GBASE_KR:
347 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
348 case ICE_PHY_TYPE_LOW_25GBASE_KR:
349 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
350 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
351 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
352 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
353 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
354 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
355 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
356 return ICE_MEDIA_BACKPLANE;
357 }
358 } else {
359 switch (hw_link_info->phy_type_high) {
360 case ICE_PHY_TYPE_HIGH_100G_AUI2:
361 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
362 if (ice_is_media_cage_present(pi))
363 return ICE_MEDIA_DA;
364 fallthrough;
365 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
366 return ICE_MEDIA_BACKPLANE;
367 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
368 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
369 return ICE_MEDIA_FIBER;
370 }
371 }
372 return ICE_MEDIA_UNKNOWN;
373 }
374
375 /**
376 * ice_aq_get_link_info
377 * @pi: port information structure
378 * @ena_lse: enable/disable LinkStatusEvent reporting
379 * @link: pointer to link status structure - optional
380 * @cd: pointer to command details structure or NULL
381 *
382 * Get Link Status (0x607). Returns the link status of the adapter.
383 */
384 enum ice_status
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)385 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
386 struct ice_link_status *link, struct ice_sq_cd *cd)
387 {
388 struct ice_aqc_get_link_status_data link_data = { 0 };
389 struct ice_aqc_get_link_status *resp;
390 struct ice_link_status *li_old, *li;
391 enum ice_media_type *hw_media_type;
392 struct ice_fc_info *hw_fc_info;
393 bool tx_pause, rx_pause;
394 struct ice_aq_desc desc;
395 enum ice_status status;
396 struct ice_hw *hw;
397 u16 cmd_flags;
398
399 if (!pi)
400 return ICE_ERR_PARAM;
401 hw = pi->hw;
402 li_old = &pi->phy.link_info_old;
403 hw_media_type = &pi->phy.media_type;
404 li = &pi->phy.link_info;
405 hw_fc_info = &pi->fc;
406
407 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
408 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
409 resp = &desc.params.get_link_status;
410 resp->cmd_flags = cpu_to_le16(cmd_flags);
411 resp->lport_num = pi->lport;
412
413 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
414
415 if (status)
416 return status;
417
418 /* save off old link status information */
419 *li_old = *li;
420
421 /* update current link status information */
422 li->link_speed = le16_to_cpu(link_data.link_speed);
423 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
424 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
425 *hw_media_type = ice_get_media_type(pi);
426 li->link_info = link_data.link_info;
427 li->an_info = link_data.an_info;
428 li->ext_info = link_data.ext_info;
429 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
430 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
431 li->topo_media_conflict = link_data.topo_media_conflict;
432 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
433 ICE_AQ_CFG_PACING_TYPE_M);
434
435 /* update fc info */
436 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
437 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
438 if (tx_pause && rx_pause)
439 hw_fc_info->current_mode = ICE_FC_FULL;
440 else if (tx_pause)
441 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
442 else if (rx_pause)
443 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
444 else
445 hw_fc_info->current_mode = ICE_FC_NONE;
446
447 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
448
449 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
450 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
451 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
452 (unsigned long long)li->phy_type_low);
453 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
454 (unsigned long long)li->phy_type_high);
455 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
456 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
457 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
458 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
459 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
460 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
461 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
462 li->max_frame_size);
463 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
464
465 /* save link status information */
466 if (link)
467 *link = *li;
468
469 /* flag cleared so calling functions don't call AQ again */
470 pi->phy.get_link_info = false;
471
472 return 0;
473 }
474
475 /**
476 * ice_fill_tx_timer_and_fc_thresh
477 * @hw: pointer to the HW struct
478 * @cmd: pointer to MAC cfg structure
479 *
480 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
481 * descriptor
482 */
483 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)484 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
485 struct ice_aqc_set_mac_cfg *cmd)
486 {
487 u16 fc_thres_val, tx_timer_val;
488 u32 val;
489
490 /* We read back the transmit timer and FC threshold value of
491 * LFC. Thus, we will use index =
492 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
493 *
494 * Also, because we are operating on transmit timer and FC
495 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
496 */
497 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
498
499 /* Retrieve the transmit timer */
500 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
501 tx_timer_val = val &
502 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
503 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
504
505 /* Retrieve the FC threshold */
506 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
507 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
508
509 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
510 }
511
512 /**
513 * ice_aq_set_mac_cfg
514 * @hw: pointer to the HW struct
515 * @max_frame_size: Maximum Frame Size to be supported
516 * @cd: pointer to command details structure or NULL
517 *
518 * Set MAC configuration (0x0603)
519 */
520 enum ice_status
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,struct ice_sq_cd * cd)521 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
522 {
523 struct ice_aqc_set_mac_cfg *cmd;
524 struct ice_aq_desc desc;
525
526 cmd = &desc.params.set_mac_cfg;
527
528 if (max_frame_size == 0)
529 return ICE_ERR_PARAM;
530
531 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
532
533 cmd->max_frame_size = cpu_to_le16(max_frame_size);
534
535 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
536
537 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
538 }
539
540 /**
541 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
542 * @hw: pointer to the HW struct
543 */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)544 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
545 {
546 struct ice_switch_info *sw;
547 enum ice_status status;
548
549 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
550 sizeof(*hw->switch_info), GFP_KERNEL);
551 sw = hw->switch_info;
552
553 if (!sw)
554 return ICE_ERR_NO_MEMORY;
555
556 INIT_LIST_HEAD(&sw->vsi_list_map_head);
557
558 status = ice_init_def_sw_recp(hw);
559 if (status) {
560 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
561 return status;
562 }
563 return 0;
564 }
565
566 /**
567 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
568 * @hw: pointer to the HW struct
569 */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)570 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
571 {
572 struct ice_switch_info *sw = hw->switch_info;
573 struct ice_vsi_list_map_info *v_pos_map;
574 struct ice_vsi_list_map_info *v_tmp_map;
575 struct ice_sw_recipe *recps;
576 u8 i;
577
578 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
579 list_entry) {
580 list_del(&v_pos_map->list_entry);
581 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
582 }
583 recps = hw->switch_info->recp_list;
584 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
585 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
586
587 recps[i].root_rid = i;
588 mutex_destroy(&recps[i].filt_rule_lock);
589 list_for_each_entry_safe(lst_itr, tmp_entry,
590 &recps[i].filt_rules, list_entry) {
591 list_del(&lst_itr->list_entry);
592 devm_kfree(ice_hw_to_dev(hw), lst_itr);
593 }
594 }
595 ice_rm_all_sw_replay_rule_info(hw);
596 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
597 devm_kfree(ice_hw_to_dev(hw), sw);
598 }
599
600 /**
601 * ice_get_fw_log_cfg - get FW logging configuration
602 * @hw: pointer to the HW struct
603 */
ice_get_fw_log_cfg(struct ice_hw * hw)604 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
605 {
606 struct ice_aq_desc desc;
607 enum ice_status status;
608 __le16 *config;
609 u16 size;
610
611 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
612 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
613 if (!config)
614 return ICE_ERR_NO_MEMORY;
615
616 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
617
618 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
619 if (!status) {
620 u16 i;
621
622 /* Save FW logging information into the HW structure */
623 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
624 u16 v, m, flgs;
625
626 v = le16_to_cpu(config[i]);
627 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
628 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
629
630 if (m < ICE_AQC_FW_LOG_ID_MAX)
631 hw->fw_log.evnts[m].cur = flgs;
632 }
633 }
634
635 devm_kfree(ice_hw_to_dev(hw), config);
636
637 return status;
638 }
639
640 /**
641 * ice_cfg_fw_log - configure FW logging
642 * @hw: pointer to the HW struct
643 * @enable: enable certain FW logging events if true, disable all if false
644 *
645 * This function enables/disables the FW logging via Rx CQ events and a UART
646 * port based on predetermined configurations. FW logging via the Rx CQ can be
647 * enabled/disabled for individual PF's. However, FW logging via the UART can
648 * only be enabled/disabled for all PFs on the same device.
649 *
650 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
651 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
652 * before initializing the device.
653 *
654 * When re/configuring FW logging, callers need to update the "cfg" elements of
655 * the hw->fw_log.evnts array with the desired logging event configurations for
656 * modules of interest. When disabling FW logging completely, the callers can
657 * just pass false in the "enable" parameter. On completion, the function will
658 * update the "cur" element of the hw->fw_log.evnts array with the resulting
659 * logging event configurations of the modules that are being re/configured. FW
660 * logging modules that are not part of a reconfiguration operation retain their
661 * previous states.
662 *
663 * Before resetting the device, it is recommended that the driver disables FW
664 * logging before shutting down the control queue. When disabling FW logging
665 * ("enable" = false), the latest configurations of FW logging events stored in
666 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
667 * a device reset.
668 *
669 * When enabling FW logging to emit log messages via the Rx CQ during the
670 * device's initialization phase, a mechanism alternative to interrupt handlers
671 * needs to be used to extract FW log messages from the Rx CQ periodically and
672 * to prevent the Rx CQ from being full and stalling other types of control
673 * messages from FW to SW. Interrupts are typically disabled during the device's
674 * initialization phase.
675 */
ice_cfg_fw_log(struct ice_hw * hw,bool enable)676 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
677 {
678 struct ice_aqc_fw_logging *cmd;
679 enum ice_status status = 0;
680 u16 i, chgs = 0, len = 0;
681 struct ice_aq_desc desc;
682 __le16 *data = NULL;
683 u8 actv_evnts = 0;
684 void *buf = NULL;
685
686 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
687 return 0;
688
689 /* Disable FW logging only when the control queue is still responsive */
690 if (!enable &&
691 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
692 return 0;
693
694 /* Get current FW log settings */
695 status = ice_get_fw_log_cfg(hw);
696 if (status)
697 return status;
698
699 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
700 cmd = &desc.params.fw_logging;
701
702 /* Indicate which controls are valid */
703 if (hw->fw_log.cq_en)
704 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
705
706 if (hw->fw_log.uart_en)
707 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
708
709 if (enable) {
710 /* Fill in an array of entries with FW logging modules and
711 * logging events being reconfigured.
712 */
713 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
714 u16 val;
715
716 /* Keep track of enabled event types */
717 actv_evnts |= hw->fw_log.evnts[i].cfg;
718
719 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
720 continue;
721
722 if (!data) {
723 data = devm_kcalloc(ice_hw_to_dev(hw),
724 ICE_AQC_FW_LOG_ID_MAX,
725 sizeof(*data),
726 GFP_KERNEL);
727 if (!data)
728 return ICE_ERR_NO_MEMORY;
729 }
730
731 val = i << ICE_AQC_FW_LOG_ID_S;
732 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
733 data[chgs++] = cpu_to_le16(val);
734 }
735
736 /* Only enable FW logging if at least one module is specified.
737 * If FW logging is currently enabled but all modules are not
738 * enabled to emit log messages, disable FW logging altogether.
739 */
740 if (actv_evnts) {
741 /* Leave if there is effectively no change */
742 if (!chgs)
743 goto out;
744
745 if (hw->fw_log.cq_en)
746 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
747
748 if (hw->fw_log.uart_en)
749 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
750
751 buf = data;
752 len = sizeof(*data) * chgs;
753 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
754 }
755 }
756
757 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
758 if (!status) {
759 /* Update the current configuration to reflect events enabled.
760 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
761 * logging mode is enabled for the device. They do not reflect
762 * actual modules being enabled to emit log messages. So, their
763 * values remain unchanged even when all modules are disabled.
764 */
765 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
766
767 hw->fw_log.actv_evnts = actv_evnts;
768 for (i = 0; i < cnt; i++) {
769 u16 v, m;
770
771 if (!enable) {
772 /* When disabling all FW logging events as part
773 * of device's de-initialization, the original
774 * configurations are retained, and can be used
775 * to reconfigure FW logging later if the device
776 * is re-initialized.
777 */
778 hw->fw_log.evnts[i].cur = 0;
779 continue;
780 }
781
782 v = le16_to_cpu(data[i]);
783 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
784 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
785 }
786 }
787
788 out:
789 if (data)
790 devm_kfree(ice_hw_to_dev(hw), data);
791
792 return status;
793 }
794
795 /**
796 * ice_output_fw_log
797 * @hw: pointer to the HW struct
798 * @desc: pointer to the AQ message descriptor
799 * @buf: pointer to the buffer accompanying the AQ message
800 *
801 * Formats a FW Log message and outputs it via the standard driver logs.
802 */
ice_output_fw_log(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf)803 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
804 {
805 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
806 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
807 le16_to_cpu(desc->datalen));
808 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
809 }
810
811 /**
812 * ice_get_itr_intrl_gran
813 * @hw: pointer to the HW struct
814 *
815 * Determines the ITR/INTRL granularities based on the maximum aggregate
816 * bandwidth according to the device's configuration during power-on.
817 */
ice_get_itr_intrl_gran(struct ice_hw * hw)818 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
819 {
820 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
821 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
822 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
823
824 switch (max_agg_bw) {
825 case ICE_MAX_AGG_BW_200G:
826 case ICE_MAX_AGG_BW_100G:
827 case ICE_MAX_AGG_BW_50G:
828 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
829 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
830 break;
831 case ICE_MAX_AGG_BW_25G:
832 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
833 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
834 break;
835 }
836 }
837
838 /**
839 * ice_init_hw - main hardware initialization routine
840 * @hw: pointer to the hardware structure
841 */
ice_init_hw(struct ice_hw * hw)842 enum ice_status ice_init_hw(struct ice_hw *hw)
843 {
844 struct ice_aqc_get_phy_caps_data *pcaps;
845 enum ice_status status;
846 u16 mac_buf_len;
847 void *mac_buf;
848
849 /* Set MAC type based on DeviceID */
850 status = ice_set_mac_type(hw);
851 if (status)
852 return status;
853
854 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
855 PF_FUNC_RID_FUNC_NUM_M) >>
856 PF_FUNC_RID_FUNC_NUM_S;
857
858 status = ice_reset(hw, ICE_RESET_PFR);
859 if (status)
860 return status;
861
862 ice_get_itr_intrl_gran(hw);
863
864 status = ice_create_all_ctrlq(hw);
865 if (status)
866 goto err_unroll_cqinit;
867
868 /* Enable FW logging. Not fatal if this fails. */
869 status = ice_cfg_fw_log(hw, true);
870 if (status)
871 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
872
873 status = ice_clear_pf_cfg(hw);
874 if (status)
875 goto err_unroll_cqinit;
876
877 /* Set bit to enable Flow Director filters */
878 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
879 INIT_LIST_HEAD(&hw->fdir_list_head);
880
881 ice_clear_pxe_mode(hw);
882
883 status = ice_init_nvm(hw);
884 if (status)
885 goto err_unroll_cqinit;
886
887 status = ice_get_caps(hw);
888 if (status)
889 goto err_unroll_cqinit;
890
891 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
892 sizeof(*hw->port_info), GFP_KERNEL);
893 if (!hw->port_info) {
894 status = ICE_ERR_NO_MEMORY;
895 goto err_unroll_cqinit;
896 }
897
898 /* set the back pointer to HW */
899 hw->port_info->hw = hw;
900
901 /* Initialize port_info struct with switch configuration data */
902 status = ice_get_initial_sw_cfg(hw);
903 if (status)
904 goto err_unroll_alloc;
905
906 hw->evb_veb = true;
907
908 /* Query the allocated resources for Tx scheduler */
909 status = ice_sched_query_res_alloc(hw);
910 if (status) {
911 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
912 goto err_unroll_alloc;
913 }
914 ice_sched_get_psm_clk_freq(hw);
915
916 /* Initialize port_info struct with scheduler data */
917 status = ice_sched_init_port(hw->port_info);
918 if (status)
919 goto err_unroll_sched;
920
921 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
922 if (!pcaps) {
923 status = ICE_ERR_NO_MEMORY;
924 goto err_unroll_sched;
925 }
926
927 /* Initialize port_info struct with PHY capabilities */
928 status = ice_aq_get_phy_caps(hw->port_info, false,
929 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
930 NULL);
931 devm_kfree(ice_hw_to_dev(hw), pcaps);
932 if (status)
933 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
934 status);
935
936 /* Initialize port_info struct with link information */
937 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
938 if (status)
939 goto err_unroll_sched;
940
941 /* need a valid SW entry point to build a Tx tree */
942 if (!hw->sw_entry_point_layer) {
943 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
944 status = ICE_ERR_CFG;
945 goto err_unroll_sched;
946 }
947 INIT_LIST_HEAD(&hw->agg_list);
948 /* Initialize max burst size */
949 if (!hw->max_burst_size)
950 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
951
952 status = ice_init_fltr_mgmt_struct(hw);
953 if (status)
954 goto err_unroll_sched;
955
956 /* Get MAC information */
957 /* A single port can report up to two (LAN and WoL) addresses */
958 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
959 sizeof(struct ice_aqc_manage_mac_read_resp),
960 GFP_KERNEL);
961 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
962
963 if (!mac_buf) {
964 status = ICE_ERR_NO_MEMORY;
965 goto err_unroll_fltr_mgmt_struct;
966 }
967
968 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
969 devm_kfree(ice_hw_to_dev(hw), mac_buf);
970
971 if (status)
972 goto err_unroll_fltr_mgmt_struct;
973 /* enable jumbo frame support at MAC level */
974 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
975 if (status)
976 goto err_unroll_fltr_mgmt_struct;
977 /* Obtain counter base index which would be used by flow director */
978 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
979 if (status)
980 goto err_unroll_fltr_mgmt_struct;
981 status = ice_init_hw_tbls(hw);
982 if (status)
983 goto err_unroll_fltr_mgmt_struct;
984 mutex_init(&hw->tnl_lock);
985 return 0;
986
987 err_unroll_fltr_mgmt_struct:
988 ice_cleanup_fltr_mgmt_struct(hw);
989 err_unroll_sched:
990 ice_sched_cleanup_all(hw);
991 err_unroll_alloc:
992 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
993 err_unroll_cqinit:
994 ice_destroy_all_ctrlq(hw);
995 return status;
996 }
997
998 /**
999 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1000 * @hw: pointer to the hardware structure
1001 *
1002 * This should be called only during nominal operation, not as a result of
1003 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1004 * applicable initializations if it fails for any reason.
1005 */
ice_deinit_hw(struct ice_hw * hw)1006 void ice_deinit_hw(struct ice_hw *hw)
1007 {
1008 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1009 ice_cleanup_fltr_mgmt_struct(hw);
1010
1011 ice_sched_cleanup_all(hw);
1012 ice_sched_clear_agg(hw);
1013 ice_free_seg(hw);
1014 ice_free_hw_tbls(hw);
1015 mutex_destroy(&hw->tnl_lock);
1016
1017 if (hw->port_info) {
1018 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1019 hw->port_info = NULL;
1020 }
1021
1022 /* Attempt to disable FW logging before shutting down control queues */
1023 ice_cfg_fw_log(hw, false);
1024 ice_destroy_all_ctrlq(hw);
1025
1026 /* Clear VSI contexts if not already cleared */
1027 ice_clear_all_vsi_ctx(hw);
1028 }
1029
1030 /**
1031 * ice_check_reset - Check to see if a global reset is complete
1032 * @hw: pointer to the hardware structure
1033 */
ice_check_reset(struct ice_hw * hw)1034 enum ice_status ice_check_reset(struct ice_hw *hw)
1035 {
1036 u32 cnt, reg = 0, grst_timeout, uld_mask;
1037
1038 /* Poll for Device Active state in case a recent CORER, GLOBR,
1039 * or EMPR has occurred. The grst delay value is in 100ms units.
1040 * Add 1sec for outstanding AQ commands that can take a long time.
1041 */
1042 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1043 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1044
1045 for (cnt = 0; cnt < grst_timeout; cnt++) {
1046 mdelay(100);
1047 reg = rd32(hw, GLGEN_RSTAT);
1048 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1049 break;
1050 }
1051
1052 if (cnt == grst_timeout) {
1053 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1054 return ICE_ERR_RESET_FAILED;
1055 }
1056
1057 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1058 GLNVM_ULD_PCIER_DONE_1_M |\
1059 GLNVM_ULD_CORER_DONE_M |\
1060 GLNVM_ULD_GLOBR_DONE_M |\
1061 GLNVM_ULD_POR_DONE_M |\
1062 GLNVM_ULD_POR_DONE_1_M |\
1063 GLNVM_ULD_PCIER_DONE_2_M)
1064
1065 uld_mask = ICE_RESET_DONE_MASK;
1066
1067 /* Device is Active; check Global Reset processes are done */
1068 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1069 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1070 if (reg == uld_mask) {
1071 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1072 break;
1073 }
1074 mdelay(10);
1075 }
1076
1077 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1078 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1079 reg);
1080 return ICE_ERR_RESET_FAILED;
1081 }
1082
1083 return 0;
1084 }
1085
1086 /**
1087 * ice_pf_reset - Reset the PF
1088 * @hw: pointer to the hardware structure
1089 *
1090 * If a global reset has been triggered, this function checks
1091 * for its completion and then issues the PF reset
1092 */
ice_pf_reset(struct ice_hw * hw)1093 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1094 {
1095 u32 cnt, reg;
1096
1097 /* If at function entry a global reset was already in progress, i.e.
1098 * state is not 'device active' or any of the reset done bits are not
1099 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1100 * global reset is done.
1101 */
1102 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1103 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1104 /* poll on global reset currently in progress until done */
1105 if (ice_check_reset(hw))
1106 return ICE_ERR_RESET_FAILED;
1107
1108 return 0;
1109 }
1110
1111 /* Reset the PF */
1112 reg = rd32(hw, PFGEN_CTRL);
1113
1114 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1115
1116 /* Wait for the PFR to complete. The wait time is the global config lock
1117 * timeout plus the PFR timeout which will account for a possible reset
1118 * that is occurring during a download package operation.
1119 */
1120 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1121 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1122 reg = rd32(hw, PFGEN_CTRL);
1123 if (!(reg & PFGEN_CTRL_PFSWR_M))
1124 break;
1125
1126 mdelay(1);
1127 }
1128
1129 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1130 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1131 return ICE_ERR_RESET_FAILED;
1132 }
1133
1134 return 0;
1135 }
1136
1137 /**
1138 * ice_reset - Perform different types of reset
1139 * @hw: pointer to the hardware structure
1140 * @req: reset request
1141 *
1142 * This function triggers a reset as specified by the req parameter.
1143 *
1144 * Note:
1145 * If anything other than a PF reset is triggered, PXE mode is restored.
1146 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1147 * interface has been restored in the rebuild flow.
1148 */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)1149 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1150 {
1151 u32 val = 0;
1152
1153 switch (req) {
1154 case ICE_RESET_PFR:
1155 return ice_pf_reset(hw);
1156 case ICE_RESET_CORER:
1157 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1158 val = GLGEN_RTRIG_CORER_M;
1159 break;
1160 case ICE_RESET_GLOBR:
1161 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1162 val = GLGEN_RTRIG_GLOBR_M;
1163 break;
1164 default:
1165 return ICE_ERR_PARAM;
1166 }
1167
1168 val |= rd32(hw, GLGEN_RTRIG);
1169 wr32(hw, GLGEN_RTRIG, val);
1170 ice_flush(hw);
1171
1172 /* wait for the FW to be ready */
1173 return ice_check_reset(hw);
1174 }
1175
1176 /**
1177 * ice_copy_rxq_ctx_to_hw
1178 * @hw: pointer to the hardware structure
1179 * @ice_rxq_ctx: pointer to the rxq context
1180 * @rxq_index: the index of the Rx queue
1181 *
1182 * Copies rxq context from dense structure to HW register space
1183 */
1184 static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1185 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1186 {
1187 u8 i;
1188
1189 if (!ice_rxq_ctx)
1190 return ICE_ERR_BAD_PTR;
1191
1192 if (rxq_index > QRX_CTRL_MAX_INDEX)
1193 return ICE_ERR_PARAM;
1194
1195 /* Copy each dword separately to HW */
1196 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1197 wr32(hw, QRX_CONTEXT(i, rxq_index),
1198 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1199
1200 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1201 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1202 }
1203
1204 return 0;
1205 }
1206
1207 /* LAN Rx Queue Context */
1208 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1209 /* Field Width LSB */
1210 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1211 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1212 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1213 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1214 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1215 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1216 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1217 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1218 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1219 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1220 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1221 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1222 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1223 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1224 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1225 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1226 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1227 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1228 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1229 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1230 { 0 }
1231 };
1232
1233 /**
1234 * ice_write_rxq_ctx
1235 * @hw: pointer to the hardware structure
1236 * @rlan_ctx: pointer to the rxq context
1237 * @rxq_index: the index of the Rx queue
1238 *
1239 * Converts rxq context from sparse to dense structure and then writes
1240 * it to HW register space and enables the hardware to prefetch descriptors
1241 * instead of only fetching them on demand
1242 */
1243 enum ice_status
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1244 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1245 u32 rxq_index)
1246 {
1247 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1248
1249 if (!rlan_ctx)
1250 return ICE_ERR_BAD_PTR;
1251
1252 rlan_ctx->prefena = 1;
1253
1254 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1255 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1256 }
1257
1258 /* LAN Tx Queue Context */
1259 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1260 /* Field Width LSB */
1261 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1262 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1263 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1264 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1265 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1266 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1267 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1268 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1269 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1270 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1271 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1272 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1273 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1274 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1275 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1276 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1277 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1278 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1279 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1280 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1281 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1282 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1283 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1284 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1285 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1286 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1287 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1288 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1289 { 0 }
1290 };
1291
1292 /* FW Admin Queue command wrappers */
1293
1294 /* Software lock/mutex that is meant to be held while the Global Config Lock
1295 * in firmware is acquired by the software to prevent most (but not all) types
1296 * of AQ commands from being sent to FW
1297 */
1298 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1299
1300 /**
1301 * ice_should_retry_sq_send_cmd
1302 * @opcode: AQ opcode
1303 *
1304 * Decide if we should retry the send command routine for the ATQ, depending
1305 * on the opcode.
1306 */
ice_should_retry_sq_send_cmd(u16 opcode)1307 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1308 {
1309 switch (opcode) {
1310 case ice_aqc_opc_get_link_topo:
1311 case ice_aqc_opc_lldp_stop:
1312 case ice_aqc_opc_lldp_start:
1313 case ice_aqc_opc_lldp_filter_ctrl:
1314 return true;
1315 }
1316
1317 return false;
1318 }
1319
1320 /**
1321 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1322 * @hw: pointer to the HW struct
1323 * @cq: pointer to the specific Control queue
1324 * @desc: prefilled descriptor describing the command
1325 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1326 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1327 * @cd: pointer to command details structure
1328 *
1329 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1330 * Queue if the EBUSY AQ error is returned.
1331 */
1332 static enum ice_status
ice_sq_send_cmd_retry(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1333 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1334 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1335 struct ice_sq_cd *cd)
1336 {
1337 struct ice_aq_desc desc_cpy;
1338 enum ice_status status;
1339 bool is_cmd_for_retry;
1340 u8 *buf_cpy = NULL;
1341 u8 idx = 0;
1342 u16 opcode;
1343
1344 opcode = le16_to_cpu(desc->opcode);
1345 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1346 memset(&desc_cpy, 0, sizeof(desc_cpy));
1347
1348 if (is_cmd_for_retry) {
1349 if (buf) {
1350 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1351 if (!buf_cpy)
1352 return ICE_ERR_NO_MEMORY;
1353 }
1354
1355 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1356 }
1357
1358 do {
1359 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1360
1361 if (!is_cmd_for_retry || !status ||
1362 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1363 break;
1364
1365 if (buf_cpy)
1366 memcpy(buf, buf_cpy, buf_size);
1367
1368 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1369
1370 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1371
1372 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1373
1374 kfree(buf_cpy);
1375
1376 return status;
1377 }
1378
1379 /**
1380 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1381 * @hw: pointer to the HW struct
1382 * @desc: descriptor describing the command
1383 * @buf: buffer to use for indirect commands (NULL for direct commands)
1384 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1385 * @cd: pointer to command details structure
1386 *
1387 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1388 */
1389 enum ice_status
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1390 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1391 u16 buf_size, struct ice_sq_cd *cd)
1392 {
1393 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1394 bool lock_acquired = false;
1395 enum ice_status status;
1396
1397 /* When a package download is in process (i.e. when the firmware's
1398 * Global Configuration Lock resource is held), only the Download
1399 * Package, Get Version, Get Package Info List and Release Resource
1400 * (with resource ID set to Global Config Lock) AdminQ commands are
1401 * allowed; all others must block until the package download completes
1402 * and the Global Config Lock is released. See also
1403 * ice_acquire_global_cfg_lock().
1404 */
1405 switch (le16_to_cpu(desc->opcode)) {
1406 case ice_aqc_opc_download_pkg:
1407 case ice_aqc_opc_get_pkg_info_list:
1408 case ice_aqc_opc_get_ver:
1409 break;
1410 case ice_aqc_opc_release_res:
1411 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1412 break;
1413 fallthrough;
1414 default:
1415 mutex_lock(&ice_global_cfg_lock_sw);
1416 lock_acquired = true;
1417 break;
1418 }
1419
1420 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1421 if (lock_acquired)
1422 mutex_unlock(&ice_global_cfg_lock_sw);
1423
1424 return status;
1425 }
1426
1427 /**
1428 * ice_aq_get_fw_ver
1429 * @hw: pointer to the HW struct
1430 * @cd: pointer to command details structure or NULL
1431 *
1432 * Get the firmware version (0x0001) from the admin queue commands
1433 */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1434 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1435 {
1436 struct ice_aqc_get_ver *resp;
1437 struct ice_aq_desc desc;
1438 enum ice_status status;
1439
1440 resp = &desc.params.get_ver;
1441
1442 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1443
1444 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1445
1446 if (!status) {
1447 hw->fw_branch = resp->fw_branch;
1448 hw->fw_maj_ver = resp->fw_major;
1449 hw->fw_min_ver = resp->fw_minor;
1450 hw->fw_patch = resp->fw_patch;
1451 hw->fw_build = le32_to_cpu(resp->fw_build);
1452 hw->api_branch = resp->api_branch;
1453 hw->api_maj_ver = resp->api_major;
1454 hw->api_min_ver = resp->api_minor;
1455 hw->api_patch = resp->api_patch;
1456 }
1457
1458 return status;
1459 }
1460
1461 /**
1462 * ice_aq_send_driver_ver
1463 * @hw: pointer to the HW struct
1464 * @dv: driver's major, minor version
1465 * @cd: pointer to command details structure or NULL
1466 *
1467 * Send the driver version (0x0002) to the firmware
1468 */
1469 enum ice_status
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)1470 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1471 struct ice_sq_cd *cd)
1472 {
1473 struct ice_aqc_driver_ver *cmd;
1474 struct ice_aq_desc desc;
1475 u16 len;
1476
1477 cmd = &desc.params.driver_ver;
1478
1479 if (!dv)
1480 return ICE_ERR_PARAM;
1481
1482 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1483
1484 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1485 cmd->major_ver = dv->major_ver;
1486 cmd->minor_ver = dv->minor_ver;
1487 cmd->build_ver = dv->build_ver;
1488 cmd->subbuild_ver = dv->subbuild_ver;
1489
1490 len = 0;
1491 while (len < sizeof(dv->driver_string) &&
1492 isascii(dv->driver_string[len]) && dv->driver_string[len])
1493 len++;
1494
1495 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1496 }
1497
1498 /**
1499 * ice_aq_q_shutdown
1500 * @hw: pointer to the HW struct
1501 * @unloading: is the driver unloading itself
1502 *
1503 * Tell the Firmware that we're shutting down the AdminQ and whether
1504 * or not the driver is unloading as well (0x0003).
1505 */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)1506 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1507 {
1508 struct ice_aqc_q_shutdown *cmd;
1509 struct ice_aq_desc desc;
1510
1511 cmd = &desc.params.q_shutdown;
1512
1513 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1514
1515 if (unloading)
1516 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1517
1518 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1519 }
1520
1521 /**
1522 * ice_aq_req_res
1523 * @hw: pointer to the HW struct
1524 * @res: resource ID
1525 * @access: access type
1526 * @sdp_number: resource number
1527 * @timeout: the maximum time in ms that the driver may hold the resource
1528 * @cd: pointer to command details structure or NULL
1529 *
1530 * Requests common resource using the admin queue commands (0x0008).
1531 * When attempting to acquire the Global Config Lock, the driver can
1532 * learn of three states:
1533 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1534 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1535 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1536 * successfully downloaded the package; the driver does
1537 * not have to download the package and can continue
1538 * loading
1539 *
1540 * Note that if the caller is in an acquire lock, perform action, release lock
1541 * phase of operation, it is possible that the FW may detect a timeout and issue
1542 * a CORER. In this case, the driver will receive a CORER interrupt and will
1543 * have to determine its cause. The calling thread that is handling this flow
1544 * will likely get an error propagated back to it indicating the Download
1545 * Package, Update Package or the Release Resource AQ commands timed out.
1546 */
1547 static enum ice_status
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)1548 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1549 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1550 struct ice_sq_cd *cd)
1551 {
1552 struct ice_aqc_req_res *cmd_resp;
1553 struct ice_aq_desc desc;
1554 enum ice_status status;
1555
1556 cmd_resp = &desc.params.res_owner;
1557
1558 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1559
1560 cmd_resp->res_id = cpu_to_le16(res);
1561 cmd_resp->access_type = cpu_to_le16(access);
1562 cmd_resp->res_number = cpu_to_le32(sdp_number);
1563 cmd_resp->timeout = cpu_to_le32(*timeout);
1564 *timeout = 0;
1565
1566 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1567
1568 /* The completion specifies the maximum time in ms that the driver
1569 * may hold the resource in the Timeout field.
1570 */
1571
1572 /* Global config lock response utilizes an additional status field.
1573 *
1574 * If the Global config lock resource is held by some other driver, the
1575 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1576 * and the timeout field indicates the maximum time the current owner
1577 * of the resource has to free it.
1578 */
1579 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1580 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1581 *timeout = le32_to_cpu(cmd_resp->timeout);
1582 return 0;
1583 } else if (le16_to_cpu(cmd_resp->status) ==
1584 ICE_AQ_RES_GLBL_IN_PROG) {
1585 *timeout = le32_to_cpu(cmd_resp->timeout);
1586 return ICE_ERR_AQ_ERROR;
1587 } else if (le16_to_cpu(cmd_resp->status) ==
1588 ICE_AQ_RES_GLBL_DONE) {
1589 return ICE_ERR_AQ_NO_WORK;
1590 }
1591
1592 /* invalid FW response, force a timeout immediately */
1593 *timeout = 0;
1594 return ICE_ERR_AQ_ERROR;
1595 }
1596
1597 /* If the resource is held by some other driver, the command completes
1598 * with a busy return value and the timeout field indicates the maximum
1599 * time the current owner of the resource has to free it.
1600 */
1601 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1602 *timeout = le32_to_cpu(cmd_resp->timeout);
1603
1604 return status;
1605 }
1606
1607 /**
1608 * ice_aq_release_res
1609 * @hw: pointer to the HW struct
1610 * @res: resource ID
1611 * @sdp_number: resource number
1612 * @cd: pointer to command details structure or NULL
1613 *
1614 * release common resource using the admin queue commands (0x0009)
1615 */
1616 static enum ice_status
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)1617 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1618 struct ice_sq_cd *cd)
1619 {
1620 struct ice_aqc_req_res *cmd;
1621 struct ice_aq_desc desc;
1622
1623 cmd = &desc.params.res_owner;
1624
1625 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1626
1627 cmd->res_id = cpu_to_le16(res);
1628 cmd->res_number = cpu_to_le32(sdp_number);
1629
1630 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1631 }
1632
1633 /**
1634 * ice_acquire_res
1635 * @hw: pointer to the HW structure
1636 * @res: resource ID
1637 * @access: access type (read or write)
1638 * @timeout: timeout in milliseconds
1639 *
1640 * This function will attempt to acquire the ownership of a resource.
1641 */
1642 enum ice_status
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)1643 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1644 enum ice_aq_res_access_type access, u32 timeout)
1645 {
1646 #define ICE_RES_POLLING_DELAY_MS 10
1647 u32 delay = ICE_RES_POLLING_DELAY_MS;
1648 u32 time_left = timeout;
1649 enum ice_status status;
1650
1651 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1652
1653 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1654 * previously acquired the resource and performed any necessary updates;
1655 * in this case the caller does not obtain the resource and has no
1656 * further work to do.
1657 */
1658 if (status == ICE_ERR_AQ_NO_WORK)
1659 goto ice_acquire_res_exit;
1660
1661 if (status)
1662 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1663
1664 /* If necessary, poll until the current lock owner timeouts */
1665 timeout = time_left;
1666 while (status && timeout && time_left) {
1667 mdelay(delay);
1668 timeout = (timeout > delay) ? timeout - delay : 0;
1669 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1670
1671 if (status == ICE_ERR_AQ_NO_WORK)
1672 /* lock free, but no work to do */
1673 break;
1674
1675 if (!status)
1676 /* lock acquired */
1677 break;
1678 }
1679 if (status && status != ICE_ERR_AQ_NO_WORK)
1680 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1681
1682 ice_acquire_res_exit:
1683 if (status == ICE_ERR_AQ_NO_WORK) {
1684 if (access == ICE_RES_WRITE)
1685 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1686 else
1687 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1688 }
1689 return status;
1690 }
1691
1692 /**
1693 * ice_release_res
1694 * @hw: pointer to the HW structure
1695 * @res: resource ID
1696 *
1697 * This function will release a resource using the proper Admin Command.
1698 */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)1699 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1700 {
1701 enum ice_status status;
1702 u32 total_delay = 0;
1703
1704 status = ice_aq_release_res(hw, res, 0, NULL);
1705
1706 /* there are some rare cases when trying to release the resource
1707 * results in an admin queue timeout, so handle them correctly
1708 */
1709 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1710 (total_delay < hw->adminq.sq_cmd_timeout)) {
1711 mdelay(1);
1712 status = ice_aq_release_res(hw, res, 0, NULL);
1713 total_delay++;
1714 }
1715 }
1716
1717 /**
1718 * ice_aq_alloc_free_res - command to allocate/free resources
1719 * @hw: pointer to the HW struct
1720 * @num_entries: number of resource entries in buffer
1721 * @buf: Indirect buffer to hold data parameters and response
1722 * @buf_size: size of buffer for indirect commands
1723 * @opc: pass in the command opcode
1724 * @cd: pointer to command details structure or NULL
1725 *
1726 * Helper function to allocate/free resources using the admin queue commands
1727 */
1728 enum ice_status
ice_aq_alloc_free_res(struct ice_hw * hw,u16 num_entries,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc,struct ice_sq_cd * cd)1729 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1730 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1731 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1732 {
1733 struct ice_aqc_alloc_free_res_cmd *cmd;
1734 struct ice_aq_desc desc;
1735
1736 cmd = &desc.params.sw_res_ctrl;
1737
1738 if (!buf)
1739 return ICE_ERR_PARAM;
1740
1741 if (buf_size < flex_array_size(buf, elem, num_entries))
1742 return ICE_ERR_PARAM;
1743
1744 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1745
1746 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1747
1748 cmd->num_entries = cpu_to_le16(num_entries);
1749
1750 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1751 }
1752
1753 /**
1754 * ice_alloc_hw_res - allocate resource
1755 * @hw: pointer to the HW struct
1756 * @type: type of resource
1757 * @num: number of resources to allocate
1758 * @btm: allocate from bottom
1759 * @res: pointer to array that will receive the resources
1760 */
1761 enum ice_status
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)1762 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1763 {
1764 struct ice_aqc_alloc_free_res_elem *buf;
1765 enum ice_status status;
1766 u16 buf_len;
1767
1768 buf_len = struct_size(buf, elem, num);
1769 buf = kzalloc(buf_len, GFP_KERNEL);
1770 if (!buf)
1771 return ICE_ERR_NO_MEMORY;
1772
1773 /* Prepare buffer to allocate resource. */
1774 buf->num_elems = cpu_to_le16(num);
1775 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1776 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1777 if (btm)
1778 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1779
1780 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1781 ice_aqc_opc_alloc_res, NULL);
1782 if (status)
1783 goto ice_alloc_res_exit;
1784
1785 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1786
1787 ice_alloc_res_exit:
1788 kfree(buf);
1789 return status;
1790 }
1791
1792 /**
1793 * ice_free_hw_res - free allocated HW resource
1794 * @hw: pointer to the HW struct
1795 * @type: type of resource to free
1796 * @num: number of resources
1797 * @res: pointer to array that contains the resources to free
1798 */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)1799 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1800 {
1801 struct ice_aqc_alloc_free_res_elem *buf;
1802 enum ice_status status;
1803 u16 buf_len;
1804
1805 buf_len = struct_size(buf, elem, num);
1806 buf = kzalloc(buf_len, GFP_KERNEL);
1807 if (!buf)
1808 return ICE_ERR_NO_MEMORY;
1809
1810 /* Prepare buffer to free resource. */
1811 buf->num_elems = cpu_to_le16(num);
1812 buf->res_type = cpu_to_le16(type);
1813 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1814
1815 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1816 ice_aqc_opc_free_res, NULL);
1817 if (status)
1818 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1819
1820 kfree(buf);
1821 return status;
1822 }
1823
1824 /**
1825 * ice_get_num_per_func - determine number of resources per PF
1826 * @hw: pointer to the HW structure
1827 * @max: value to be evenly split between each PF
1828 *
1829 * Determine the number of valid functions by going through the bitmap returned
1830 * from parsing capabilities and use this to calculate the number of resources
1831 * per PF based on the max value passed in.
1832 */
ice_get_num_per_func(struct ice_hw * hw,u32 max)1833 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1834 {
1835 u8 funcs;
1836
1837 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1838 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1839 ICE_CAPS_VALID_FUNCS_M);
1840
1841 if (!funcs)
1842 return 0;
1843
1844 return max / funcs;
1845 }
1846
1847 /**
1848 * ice_parse_common_caps - parse common device/function capabilities
1849 * @hw: pointer to the HW struct
1850 * @caps: pointer to common capabilities structure
1851 * @elem: the capability element to parse
1852 * @prefix: message prefix for tracing capabilities
1853 *
1854 * Given a capability element, extract relevant details into the common
1855 * capability structure.
1856 *
1857 * Returns: true if the capability matches one of the common capability ids,
1858 * false otherwise.
1859 */
1860 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)1861 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1862 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1863 {
1864 u32 logical_id = le32_to_cpu(elem->logical_id);
1865 u32 phys_id = le32_to_cpu(elem->phys_id);
1866 u32 number = le32_to_cpu(elem->number);
1867 u16 cap = le16_to_cpu(elem->cap);
1868 bool found = true;
1869
1870 switch (cap) {
1871 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1872 caps->valid_functions = number;
1873 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1874 caps->valid_functions);
1875 break;
1876 case ICE_AQC_CAPS_SRIOV:
1877 caps->sr_iov_1_1 = (number == 1);
1878 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
1879 caps->sr_iov_1_1);
1880 break;
1881 case ICE_AQC_CAPS_DCB:
1882 caps->dcb = (number == 1);
1883 caps->active_tc_bitmap = logical_id;
1884 caps->maxtc = phys_id;
1885 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1886 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1887 caps->active_tc_bitmap);
1888 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1889 break;
1890 case ICE_AQC_CAPS_RSS:
1891 caps->rss_table_size = number;
1892 caps->rss_table_entry_width = logical_id;
1893 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1894 caps->rss_table_size);
1895 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1896 caps->rss_table_entry_width);
1897 break;
1898 case ICE_AQC_CAPS_RXQS:
1899 caps->num_rxq = number;
1900 caps->rxq_first_id = phys_id;
1901 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1902 caps->num_rxq);
1903 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1904 caps->rxq_first_id);
1905 break;
1906 case ICE_AQC_CAPS_TXQS:
1907 caps->num_txq = number;
1908 caps->txq_first_id = phys_id;
1909 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1910 caps->num_txq);
1911 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1912 caps->txq_first_id);
1913 break;
1914 case ICE_AQC_CAPS_MSIX:
1915 caps->num_msix_vectors = number;
1916 caps->msix_vector_first_id = phys_id;
1917 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1918 caps->num_msix_vectors);
1919 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1920 caps->msix_vector_first_id);
1921 break;
1922 case ICE_AQC_CAPS_PENDING_NVM_VER:
1923 caps->nvm_update_pending_nvm = true;
1924 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
1925 break;
1926 case ICE_AQC_CAPS_PENDING_OROM_VER:
1927 caps->nvm_update_pending_orom = true;
1928 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
1929 break;
1930 case ICE_AQC_CAPS_PENDING_NET_VER:
1931 caps->nvm_update_pending_netlist = true;
1932 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
1933 break;
1934 case ICE_AQC_CAPS_NVM_MGMT:
1935 caps->nvm_unified_update =
1936 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1937 true : false;
1938 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1939 caps->nvm_unified_update);
1940 break;
1941 case ICE_AQC_CAPS_MAX_MTU:
1942 caps->max_mtu = number;
1943 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1944 prefix, caps->max_mtu);
1945 break;
1946 default:
1947 /* Not one of the recognized common capabilities */
1948 found = false;
1949 }
1950
1951 return found;
1952 }
1953
1954 /**
1955 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
1956 * @hw: pointer to the HW structure
1957 * @caps: pointer to capabilities structure to fix
1958 *
1959 * Re-calculate the capabilities that are dependent on the number of physical
1960 * ports; i.e. some features are not supported or function differently on
1961 * devices with more than 4 ports.
1962 */
1963 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)1964 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
1965 {
1966 /* This assumes device capabilities are always scanned before function
1967 * capabilities during the initialization flow.
1968 */
1969 if (hw->dev_caps.num_funcs > 4) {
1970 /* Max 4 TCs per port */
1971 caps->maxtc = 4;
1972 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
1973 caps->maxtc);
1974 }
1975 }
1976
1977 /**
1978 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
1979 * @hw: pointer to the HW struct
1980 * @func_p: pointer to function capabilities structure
1981 * @cap: pointer to the capability element to parse
1982 *
1983 * Extract function capabilities for ICE_AQC_CAPS_VF.
1984 */
1985 static void
ice_parse_vf_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)1986 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1987 struct ice_aqc_list_caps_elem *cap)
1988 {
1989 u32 logical_id = le32_to_cpu(cap->logical_id);
1990 u32 number = le32_to_cpu(cap->number);
1991
1992 func_p->num_allocd_vfs = number;
1993 func_p->vf_base_id = logical_id;
1994 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
1995 func_p->num_allocd_vfs);
1996 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
1997 func_p->vf_base_id);
1998 }
1999
2000 /**
2001 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2002 * @hw: pointer to the HW struct
2003 * @func_p: pointer to function capabilities structure
2004 * @cap: pointer to the capability element to parse
2005 *
2006 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2007 */
2008 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2009 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2010 struct ice_aqc_list_caps_elem *cap)
2011 {
2012 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2013 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2014 le32_to_cpu(cap->number));
2015 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2016 func_p->guar_num_vsi);
2017 }
2018
2019 /**
2020 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2021 * @hw: pointer to the HW struct
2022 * @func_p: pointer to function capabilities structure
2023 *
2024 * Extract function capabilities for ICE_AQC_CAPS_FD.
2025 */
2026 static void
ice_parse_fdir_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p)2027 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2028 {
2029 u32 reg_val, val;
2030
2031 reg_val = rd32(hw, GLQF_FD_SIZE);
2032 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2033 GLQF_FD_SIZE_FD_GSIZE_S;
2034 func_p->fd_fltr_guar =
2035 ice_get_num_per_func(hw, val);
2036 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2037 GLQF_FD_SIZE_FD_BSIZE_S;
2038 func_p->fd_fltr_best_effort = val;
2039
2040 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2041 func_p->fd_fltr_guar);
2042 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2043 func_p->fd_fltr_best_effort);
2044 }
2045
2046 /**
2047 * ice_parse_func_caps - Parse function capabilities
2048 * @hw: pointer to the HW struct
2049 * @func_p: pointer to function capabilities structure
2050 * @buf: buffer containing the function capability records
2051 * @cap_count: the number of capabilities
2052 *
2053 * Helper function to parse function (0x000A) capabilities list. For
2054 * capabilities shared between device and function, this relies on
2055 * ice_parse_common_caps.
2056 *
2057 * Loop through the list of provided capabilities and extract the relevant
2058 * data into the function capabilities structured.
2059 */
2060 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)2061 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2062 void *buf, u32 cap_count)
2063 {
2064 struct ice_aqc_list_caps_elem *cap_resp;
2065 u32 i;
2066
2067 cap_resp = buf;
2068
2069 memset(func_p, 0, sizeof(*func_p));
2070
2071 for (i = 0; i < cap_count; i++) {
2072 u16 cap = le16_to_cpu(cap_resp[i].cap);
2073 bool found;
2074
2075 found = ice_parse_common_caps(hw, &func_p->common_cap,
2076 &cap_resp[i], "func caps");
2077
2078 switch (cap) {
2079 case ICE_AQC_CAPS_VF:
2080 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2081 break;
2082 case ICE_AQC_CAPS_VSI:
2083 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2084 break;
2085 case ICE_AQC_CAPS_FD:
2086 ice_parse_fdir_func_caps(hw, func_p);
2087 break;
2088 default:
2089 /* Don't list common capabilities as unknown */
2090 if (!found)
2091 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2092 i, cap);
2093 break;
2094 }
2095 }
2096
2097 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2098 }
2099
2100 /**
2101 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2102 * @hw: pointer to the HW struct
2103 * @dev_p: pointer to device capabilities structure
2104 * @cap: capability element to parse
2105 *
2106 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2107 */
2108 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2109 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2110 struct ice_aqc_list_caps_elem *cap)
2111 {
2112 u32 number = le32_to_cpu(cap->number);
2113
2114 dev_p->num_funcs = hweight32(number);
2115 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2116 dev_p->num_funcs);
2117 }
2118
2119 /**
2120 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2121 * @hw: pointer to the HW struct
2122 * @dev_p: pointer to device capabilities structure
2123 * @cap: capability element to parse
2124 *
2125 * Parse ICE_AQC_CAPS_VF for device capabilities.
2126 */
2127 static void
ice_parse_vf_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2128 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2129 struct ice_aqc_list_caps_elem *cap)
2130 {
2131 u32 number = le32_to_cpu(cap->number);
2132
2133 dev_p->num_vfs_exposed = number;
2134 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2135 dev_p->num_vfs_exposed);
2136 }
2137
2138 /**
2139 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2140 * @hw: pointer to the HW struct
2141 * @dev_p: pointer to device capabilities structure
2142 * @cap: capability element to parse
2143 *
2144 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2145 */
2146 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2147 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2148 struct ice_aqc_list_caps_elem *cap)
2149 {
2150 u32 number = le32_to_cpu(cap->number);
2151
2152 dev_p->num_vsi_allocd_to_host = number;
2153 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2154 dev_p->num_vsi_allocd_to_host);
2155 }
2156
2157 /**
2158 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2159 * @hw: pointer to the HW struct
2160 * @dev_p: pointer to device capabilities structure
2161 * @cap: capability element to parse
2162 *
2163 * Parse ICE_AQC_CAPS_FD for device capabilities.
2164 */
2165 static void
ice_parse_fdir_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2166 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2167 struct ice_aqc_list_caps_elem *cap)
2168 {
2169 u32 number = le32_to_cpu(cap->number);
2170
2171 dev_p->num_flow_director_fltr = number;
2172 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2173 dev_p->num_flow_director_fltr);
2174 }
2175
2176 /**
2177 * ice_parse_dev_caps - Parse device capabilities
2178 * @hw: pointer to the HW struct
2179 * @dev_p: pointer to device capabilities structure
2180 * @buf: buffer containing the device capability records
2181 * @cap_count: the number of capabilities
2182 *
2183 * Helper device to parse device (0x000B) capabilities list. For
2184 * capabilities shared between device and function, this relies on
2185 * ice_parse_common_caps.
2186 *
2187 * Loop through the list of provided capabilities and extract the relevant
2188 * data into the device capabilities structured.
2189 */
2190 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2191 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2192 void *buf, u32 cap_count)
2193 {
2194 struct ice_aqc_list_caps_elem *cap_resp;
2195 u32 i;
2196
2197 cap_resp = buf;
2198
2199 memset(dev_p, 0, sizeof(*dev_p));
2200
2201 for (i = 0; i < cap_count; i++) {
2202 u16 cap = le16_to_cpu(cap_resp[i].cap);
2203 bool found;
2204
2205 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2206 &cap_resp[i], "dev caps");
2207
2208 switch (cap) {
2209 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2210 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2211 break;
2212 case ICE_AQC_CAPS_VF:
2213 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2214 break;
2215 case ICE_AQC_CAPS_VSI:
2216 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2217 break;
2218 case ICE_AQC_CAPS_FD:
2219 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2220 break;
2221 default:
2222 /* Don't list common capabilities as unknown */
2223 if (!found)
2224 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2225 i, cap);
2226 break;
2227 }
2228 }
2229
2230 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2231 }
2232
2233 /**
2234 * ice_aq_list_caps - query function/device capabilities
2235 * @hw: pointer to the HW struct
2236 * @buf: a buffer to hold the capabilities
2237 * @buf_size: size of the buffer
2238 * @cap_count: if not NULL, set to the number of capabilities reported
2239 * @opc: capabilities type to discover, device or function
2240 * @cd: pointer to command details structure or NULL
2241 *
2242 * Get the function (0x000A) or device (0x000B) capabilities description from
2243 * firmware and store it in the buffer.
2244 *
2245 * If the cap_count pointer is not NULL, then it is set to the number of
2246 * capabilities firmware will report. Note that if the buffer size is too
2247 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2248 * cap_count will still be updated in this case. It is recommended that the
2249 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2250 * firmware could return) to avoid this.
2251 */
2252 enum ice_status
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2253 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2254 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2255 {
2256 struct ice_aqc_list_caps *cmd;
2257 struct ice_aq_desc desc;
2258 enum ice_status status;
2259
2260 cmd = &desc.params.get_cap;
2261
2262 if (opc != ice_aqc_opc_list_func_caps &&
2263 opc != ice_aqc_opc_list_dev_caps)
2264 return ICE_ERR_PARAM;
2265
2266 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2267 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2268
2269 if (cap_count)
2270 *cap_count = le32_to_cpu(cmd->count);
2271
2272 return status;
2273 }
2274
2275 /**
2276 * ice_discover_dev_caps - Read and extract device capabilities
2277 * @hw: pointer to the hardware structure
2278 * @dev_caps: pointer to device capabilities structure
2279 *
2280 * Read the device capabilities and extract them into the dev_caps structure
2281 * for later use.
2282 */
2283 enum ice_status
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)2284 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2285 {
2286 enum ice_status status;
2287 u32 cap_count = 0;
2288 void *cbuf;
2289
2290 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2291 if (!cbuf)
2292 return ICE_ERR_NO_MEMORY;
2293
2294 /* Although the driver doesn't know the number of capabilities the
2295 * device will return, we can simply send a 4KB buffer, the maximum
2296 * possible size that firmware can return.
2297 */
2298 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2299
2300 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2301 ice_aqc_opc_list_dev_caps, NULL);
2302 if (!status)
2303 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2304 kfree(cbuf);
2305
2306 return status;
2307 }
2308
2309 /**
2310 * ice_discover_func_caps - Read and extract function capabilities
2311 * @hw: pointer to the hardware structure
2312 * @func_caps: pointer to function capabilities structure
2313 *
2314 * Read the function capabilities and extract them into the func_caps structure
2315 * for later use.
2316 */
2317 static enum ice_status
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)2318 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2319 {
2320 enum ice_status status;
2321 u32 cap_count = 0;
2322 void *cbuf;
2323
2324 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2325 if (!cbuf)
2326 return ICE_ERR_NO_MEMORY;
2327
2328 /* Although the driver doesn't know the number of capabilities the
2329 * device will return, we can simply send a 4KB buffer, the maximum
2330 * possible size that firmware can return.
2331 */
2332 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2333
2334 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2335 ice_aqc_opc_list_func_caps, NULL);
2336 if (!status)
2337 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2338 kfree(cbuf);
2339
2340 return status;
2341 }
2342
2343 /**
2344 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2345 * @hw: pointer to the hardware structure
2346 */
ice_set_safe_mode_caps(struct ice_hw * hw)2347 void ice_set_safe_mode_caps(struct ice_hw *hw)
2348 {
2349 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2350 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2351 struct ice_hw_common_caps cached_caps;
2352 u32 num_funcs;
2353
2354 /* cache some func_caps values that should be restored after memset */
2355 cached_caps = func_caps->common_cap;
2356
2357 /* unset func capabilities */
2358 memset(func_caps, 0, sizeof(*func_caps));
2359
2360 #define ICE_RESTORE_FUNC_CAP(name) \
2361 func_caps->common_cap.name = cached_caps.name
2362
2363 /* restore cached values */
2364 ICE_RESTORE_FUNC_CAP(valid_functions);
2365 ICE_RESTORE_FUNC_CAP(txq_first_id);
2366 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2367 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2368 ICE_RESTORE_FUNC_CAP(max_mtu);
2369 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2370 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2371 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2372 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2373
2374 /* one Tx and one Rx queue in safe mode */
2375 func_caps->common_cap.num_rxq = 1;
2376 func_caps->common_cap.num_txq = 1;
2377
2378 /* two MSIX vectors, one for traffic and one for misc causes */
2379 func_caps->common_cap.num_msix_vectors = 2;
2380 func_caps->guar_num_vsi = 1;
2381
2382 /* cache some dev_caps values that should be restored after memset */
2383 cached_caps = dev_caps->common_cap;
2384 num_funcs = dev_caps->num_funcs;
2385
2386 /* unset dev capabilities */
2387 memset(dev_caps, 0, sizeof(*dev_caps));
2388
2389 #define ICE_RESTORE_DEV_CAP(name) \
2390 dev_caps->common_cap.name = cached_caps.name
2391
2392 /* restore cached values */
2393 ICE_RESTORE_DEV_CAP(valid_functions);
2394 ICE_RESTORE_DEV_CAP(txq_first_id);
2395 ICE_RESTORE_DEV_CAP(rxq_first_id);
2396 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2397 ICE_RESTORE_DEV_CAP(max_mtu);
2398 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2399 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2400 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2401 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2402 dev_caps->num_funcs = num_funcs;
2403
2404 /* one Tx and one Rx queue per function in safe mode */
2405 dev_caps->common_cap.num_rxq = num_funcs;
2406 dev_caps->common_cap.num_txq = num_funcs;
2407
2408 /* two MSIX vectors per function */
2409 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2410 }
2411
2412 /**
2413 * ice_get_caps - get info about the HW
2414 * @hw: pointer to the hardware structure
2415 */
ice_get_caps(struct ice_hw * hw)2416 enum ice_status ice_get_caps(struct ice_hw *hw)
2417 {
2418 enum ice_status status;
2419
2420 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2421 if (status)
2422 return status;
2423
2424 return ice_discover_func_caps(hw, &hw->func_caps);
2425 }
2426
2427 /**
2428 * ice_aq_manage_mac_write - manage MAC address write command
2429 * @hw: pointer to the HW struct
2430 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2431 * @flags: flags to control write behavior
2432 * @cd: pointer to command details structure or NULL
2433 *
2434 * This function is used to write MAC address to the NVM (0x0108).
2435 */
2436 enum ice_status
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)2437 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2438 struct ice_sq_cd *cd)
2439 {
2440 struct ice_aqc_manage_mac_write *cmd;
2441 struct ice_aq_desc desc;
2442
2443 cmd = &desc.params.mac_write;
2444 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2445
2446 cmd->flags = flags;
2447 ether_addr_copy(cmd->mac_addr, mac_addr);
2448
2449 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2450 }
2451
2452 /**
2453 * ice_aq_clear_pxe_mode
2454 * @hw: pointer to the HW struct
2455 *
2456 * Tell the firmware that the driver is taking over from PXE (0x0110).
2457 */
ice_aq_clear_pxe_mode(struct ice_hw * hw)2458 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2459 {
2460 struct ice_aq_desc desc;
2461
2462 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2463 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2464
2465 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2466 }
2467
2468 /**
2469 * ice_clear_pxe_mode - clear pxe operations mode
2470 * @hw: pointer to the HW struct
2471 *
2472 * Make sure all PXE mode settings are cleared, including things
2473 * like descriptor fetch/write-back mode.
2474 */
ice_clear_pxe_mode(struct ice_hw * hw)2475 void ice_clear_pxe_mode(struct ice_hw *hw)
2476 {
2477 if (ice_check_sq_alive(hw, &hw->adminq))
2478 ice_aq_clear_pxe_mode(hw);
2479 }
2480
2481 /**
2482 * ice_get_link_speed_based_on_phy_type - returns link speed
2483 * @phy_type_low: lower part of phy_type
2484 * @phy_type_high: higher part of phy_type
2485 *
2486 * This helper function will convert an entry in PHY type structure
2487 * [phy_type_low, phy_type_high] to its corresponding link speed.
2488 * Note: In the structure of [phy_type_low, phy_type_high], there should
2489 * be one bit set, as this function will convert one PHY type to its
2490 * speed.
2491 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2492 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2493 */
2494 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)2495 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2496 {
2497 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2498 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2499
2500 switch (phy_type_low) {
2501 case ICE_PHY_TYPE_LOW_100BASE_TX:
2502 case ICE_PHY_TYPE_LOW_100M_SGMII:
2503 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2504 break;
2505 case ICE_PHY_TYPE_LOW_1000BASE_T:
2506 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2507 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2508 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2509 case ICE_PHY_TYPE_LOW_1G_SGMII:
2510 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2511 break;
2512 case ICE_PHY_TYPE_LOW_2500BASE_T:
2513 case ICE_PHY_TYPE_LOW_2500BASE_X:
2514 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2515 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2516 break;
2517 case ICE_PHY_TYPE_LOW_5GBASE_T:
2518 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2519 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2520 break;
2521 case ICE_PHY_TYPE_LOW_10GBASE_T:
2522 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2523 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2524 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2525 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2526 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2527 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2528 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2529 break;
2530 case ICE_PHY_TYPE_LOW_25GBASE_T:
2531 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2532 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2533 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2534 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2535 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2536 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2537 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2538 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2539 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2540 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2541 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2542 break;
2543 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2544 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2545 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2546 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2547 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2548 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2549 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2550 break;
2551 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2552 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2553 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2554 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2555 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2556 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2557 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2558 case ICE_PHY_TYPE_LOW_50G_AUI2:
2559 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2560 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2561 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2562 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2563 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2564 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2565 case ICE_PHY_TYPE_LOW_50G_AUI1:
2566 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2567 break;
2568 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2569 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2570 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2571 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2572 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2573 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2574 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2575 case ICE_PHY_TYPE_LOW_100G_AUI4:
2576 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2577 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2578 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2579 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2580 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2581 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2582 break;
2583 default:
2584 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2585 break;
2586 }
2587
2588 switch (phy_type_high) {
2589 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2590 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2591 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2592 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2593 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2594 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2595 break;
2596 default:
2597 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2598 break;
2599 }
2600
2601 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2602 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2603 return ICE_AQ_LINK_SPEED_UNKNOWN;
2604 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2605 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2606 return ICE_AQ_LINK_SPEED_UNKNOWN;
2607 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2608 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2609 return speed_phy_type_low;
2610 else
2611 return speed_phy_type_high;
2612 }
2613
2614 /**
2615 * ice_update_phy_type
2616 * @phy_type_low: pointer to the lower part of phy_type
2617 * @phy_type_high: pointer to the higher part of phy_type
2618 * @link_speeds_bitmap: targeted link speeds bitmap
2619 *
2620 * Note: For the link_speeds_bitmap structure, you can check it at
2621 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2622 * link_speeds_bitmap include multiple speeds.
2623 *
2624 * Each entry in this [phy_type_low, phy_type_high] structure will
2625 * present a certain link speed. This helper function will turn on bits
2626 * in [phy_type_low, phy_type_high] structure based on the value of
2627 * link_speeds_bitmap input parameter.
2628 */
2629 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)2630 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2631 u16 link_speeds_bitmap)
2632 {
2633 u64 pt_high;
2634 u64 pt_low;
2635 int index;
2636 u16 speed;
2637
2638 /* We first check with low part of phy_type */
2639 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2640 pt_low = BIT_ULL(index);
2641 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2642
2643 if (link_speeds_bitmap & speed)
2644 *phy_type_low |= BIT_ULL(index);
2645 }
2646
2647 /* We then check with high part of phy_type */
2648 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2649 pt_high = BIT_ULL(index);
2650 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2651
2652 if (link_speeds_bitmap & speed)
2653 *phy_type_high |= BIT_ULL(index);
2654 }
2655 }
2656
2657 /**
2658 * ice_aq_set_phy_cfg
2659 * @hw: pointer to the HW struct
2660 * @pi: port info structure of the interested logical port
2661 * @cfg: structure with PHY configuration data to be set
2662 * @cd: pointer to command details structure or NULL
2663 *
2664 * Set the various PHY configuration parameters supported on the Port.
2665 * One or more of the Set PHY config parameters may be ignored in an MFP
2666 * mode as the PF may not have the privilege to set some of the PHY Config
2667 * parameters. This status will be indicated by the command response (0x0601).
2668 */
2669 enum ice_status
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)2670 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2671 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2672 {
2673 struct ice_aq_desc desc;
2674 enum ice_status status;
2675
2676 if (!cfg)
2677 return ICE_ERR_PARAM;
2678
2679 /* Ensure that only valid bits of cfg->caps can be turned on. */
2680 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2681 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2682 cfg->caps);
2683
2684 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2685 }
2686
2687 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2688 desc.params.set_phy.lport_num = pi->lport;
2689 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2690
2691 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2692 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2693 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2694 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2695 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2696 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2697 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2698 cfg->low_power_ctrl_an);
2699 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2700 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2701 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2702 cfg->link_fec_opt);
2703
2704 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2705 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2706 status = 0;
2707
2708 if (!status)
2709 pi->phy.curr_user_phy_cfg = *cfg;
2710
2711 return status;
2712 }
2713
2714 /**
2715 * ice_update_link_info - update status of the HW network link
2716 * @pi: port info structure of the interested logical port
2717 */
ice_update_link_info(struct ice_port_info * pi)2718 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2719 {
2720 struct ice_link_status *li;
2721 enum ice_status status;
2722
2723 if (!pi)
2724 return ICE_ERR_PARAM;
2725
2726 li = &pi->phy.link_info;
2727
2728 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2729 if (status)
2730 return status;
2731
2732 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2733 struct ice_aqc_get_phy_caps_data *pcaps;
2734 struct ice_hw *hw;
2735
2736 hw = pi->hw;
2737 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2738 GFP_KERNEL);
2739 if (!pcaps)
2740 return ICE_ERR_NO_MEMORY;
2741
2742 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2743 pcaps, NULL);
2744
2745 devm_kfree(ice_hw_to_dev(hw), pcaps);
2746 }
2747
2748 return status;
2749 }
2750
2751 /**
2752 * ice_cache_phy_user_req
2753 * @pi: port information structure
2754 * @cache_data: PHY logging data
2755 * @cache_mode: PHY logging mode
2756 *
2757 * Log the user request on (FC, FEC, SPEED) for later use.
2758 */
2759 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)2760 ice_cache_phy_user_req(struct ice_port_info *pi,
2761 struct ice_phy_cache_mode_data cache_data,
2762 enum ice_phy_cache_mode cache_mode)
2763 {
2764 if (!pi)
2765 return;
2766
2767 switch (cache_mode) {
2768 case ICE_FC_MODE:
2769 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2770 break;
2771 case ICE_SPEED_MODE:
2772 pi->phy.curr_user_speed_req =
2773 cache_data.data.curr_user_speed_req;
2774 break;
2775 case ICE_FEC_MODE:
2776 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2777 break;
2778 default:
2779 break;
2780 }
2781 }
2782
2783 /**
2784 * ice_caps_to_fc_mode
2785 * @caps: PHY capabilities
2786 *
2787 * Convert PHY FC capabilities to ice FC mode
2788 */
ice_caps_to_fc_mode(u8 caps)2789 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2790 {
2791 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2792 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2793 return ICE_FC_FULL;
2794
2795 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2796 return ICE_FC_TX_PAUSE;
2797
2798 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2799 return ICE_FC_RX_PAUSE;
2800
2801 return ICE_FC_NONE;
2802 }
2803
2804 /**
2805 * ice_caps_to_fec_mode
2806 * @caps: PHY capabilities
2807 * @fec_options: Link FEC options
2808 *
2809 * Convert PHY FEC capabilities to ice FEC mode
2810 */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)2811 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2812 {
2813 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2814 return ICE_FEC_AUTO;
2815
2816 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2817 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2818 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2819 ICE_AQC_PHY_FEC_25G_KR_REQ))
2820 return ICE_FEC_BASER;
2821
2822 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2823 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2824 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2825 return ICE_FEC_RS;
2826
2827 return ICE_FEC_NONE;
2828 }
2829
2830 /**
2831 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2832 * @pi: port information structure
2833 * @cfg: PHY configuration data to set FC mode
2834 * @req_mode: FC mode to configure
2835 */
2836 enum ice_status
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)2837 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2838 enum ice_fc_mode req_mode)
2839 {
2840 struct ice_phy_cache_mode_data cache_data;
2841 u8 pause_mask = 0x0;
2842
2843 if (!pi || !cfg)
2844 return ICE_ERR_BAD_PTR;
2845
2846 switch (req_mode) {
2847 case ICE_FC_FULL:
2848 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2849 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2850 break;
2851 case ICE_FC_RX_PAUSE:
2852 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2853 break;
2854 case ICE_FC_TX_PAUSE:
2855 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2856 break;
2857 default:
2858 break;
2859 }
2860
2861 /* clear the old pause settings */
2862 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2863 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2864
2865 /* set the new capabilities */
2866 cfg->caps |= pause_mask;
2867
2868 /* Cache user FC request */
2869 cache_data.data.curr_user_fc_req = req_mode;
2870 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2871
2872 return 0;
2873 }
2874
2875 /**
2876 * ice_set_fc
2877 * @pi: port information structure
2878 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2879 * @ena_auto_link_update: enable automatic link update
2880 *
2881 * Set the requested flow control mode.
2882 */
2883 enum ice_status
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)2884 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2885 {
2886 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2887 struct ice_aqc_get_phy_caps_data *pcaps;
2888 enum ice_status status;
2889 struct ice_hw *hw;
2890
2891 if (!pi || !aq_failures)
2892 return ICE_ERR_BAD_PTR;
2893
2894 *aq_failures = 0;
2895 hw = pi->hw;
2896
2897 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2898 if (!pcaps)
2899 return ICE_ERR_NO_MEMORY;
2900
2901 /* Get the current PHY config */
2902 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
2903 pcaps, NULL);
2904 if (status) {
2905 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2906 goto out;
2907 }
2908
2909 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2910
2911 /* Configure the set PHY data */
2912 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2913 if (status)
2914 goto out;
2915
2916 /* If the capabilities have changed, then set the new config */
2917 if (cfg.caps != pcaps->caps) {
2918 int retry_count, retry_max = 10;
2919
2920 /* Auto restart link so settings take effect */
2921 if (ena_auto_link_update)
2922 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2923
2924 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2925 if (status) {
2926 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2927 goto out;
2928 }
2929
2930 /* Update the link info
2931 * It sometimes takes a really long time for link to
2932 * come back from the atomic reset. Thus, we wait a
2933 * little bit.
2934 */
2935 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2936 status = ice_update_link_info(pi);
2937
2938 if (!status)
2939 break;
2940
2941 mdelay(100);
2942 }
2943
2944 if (status)
2945 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2946 }
2947
2948 out:
2949 devm_kfree(ice_hw_to_dev(hw), pcaps);
2950 return status;
2951 }
2952
2953 /**
2954 * ice_phy_caps_equals_cfg
2955 * @phy_caps: PHY capabilities
2956 * @phy_cfg: PHY configuration
2957 *
2958 * Helper function to determine if PHY capabilities matches PHY
2959 * configuration
2960 */
2961 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)2962 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2963 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2964 {
2965 u8 caps_mask, cfg_mask;
2966
2967 if (!phy_caps || !phy_cfg)
2968 return false;
2969
2970 /* These bits are not common between capabilities and configuration.
2971 * Do not use them to determine equality.
2972 */
2973 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2974 ICE_AQC_GET_PHY_EN_MOD_QUAL);
2975 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2976
2977 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2978 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2979 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2980 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2981 phy_caps->eee_cap != phy_cfg->eee_cap ||
2982 phy_caps->eeer_value != phy_cfg->eeer_value ||
2983 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2984 return false;
2985
2986 return true;
2987 }
2988
2989 /**
2990 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2991 * @pi: port information structure
2992 * @caps: PHY ability structure to copy date from
2993 * @cfg: PHY configuration structure to copy data to
2994 *
2995 * Helper function to copy AQC PHY get ability data to PHY set configuration
2996 * data structure
2997 */
2998 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)2999 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3000 struct ice_aqc_get_phy_caps_data *caps,
3001 struct ice_aqc_set_phy_cfg_data *cfg)
3002 {
3003 if (!pi || !caps || !cfg)
3004 return;
3005
3006 memset(cfg, 0, sizeof(*cfg));
3007 cfg->phy_type_low = caps->phy_type_low;
3008 cfg->phy_type_high = caps->phy_type_high;
3009 cfg->caps = caps->caps;
3010 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3011 cfg->eee_cap = caps->eee_cap;
3012 cfg->eeer_value = caps->eeer_value;
3013 cfg->link_fec_opt = caps->link_fec_options;
3014 cfg->module_compliance_enforcement =
3015 caps->module_compliance_enforcement;
3016 }
3017
3018 /**
3019 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3020 * @pi: port information structure
3021 * @cfg: PHY configuration data to set FEC mode
3022 * @fec: FEC mode to configure
3023 */
3024 enum ice_status
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)3025 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3026 enum ice_fec_mode fec)
3027 {
3028 struct ice_aqc_get_phy_caps_data *pcaps;
3029 enum ice_status status;
3030 struct ice_hw *hw;
3031
3032 if (!pi || !cfg)
3033 return ICE_ERR_BAD_PTR;
3034
3035 hw = pi->hw;
3036
3037 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3038 if (!pcaps)
3039 return ICE_ERR_NO_MEMORY;
3040
3041 status = ice_aq_get_phy_caps(pi, false,
3042 (ice_fw_supports_report_dflt_cfg(hw) ?
3043 ICE_AQC_REPORT_DFLT_CFG :
3044 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3045 if (status)
3046 goto out;
3047
3048 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3049 cfg->link_fec_opt = pcaps->link_fec_options;
3050
3051 switch (fec) {
3052 case ICE_FEC_BASER:
3053 /* Clear RS bits, and AND BASE-R ability
3054 * bits and OR request bits.
3055 */
3056 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3057 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3058 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3059 ICE_AQC_PHY_FEC_25G_KR_REQ;
3060 break;
3061 case ICE_FEC_RS:
3062 /* Clear BASE-R bits, and AND RS ability
3063 * bits and OR request bits.
3064 */
3065 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3066 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3067 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3068 break;
3069 case ICE_FEC_NONE:
3070 /* Clear all FEC option bits. */
3071 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3072 break;
3073 case ICE_FEC_AUTO:
3074 /* AND auto FEC bit, and all caps bits. */
3075 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3076 cfg->link_fec_opt |= pcaps->link_fec_options;
3077 break;
3078 default:
3079 status = ICE_ERR_PARAM;
3080 break;
3081 }
3082
3083 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3084 !ice_fw_supports_report_dflt_cfg(hw)) {
3085 struct ice_link_default_override_tlv tlv;
3086
3087 if (ice_get_link_default_override(&tlv, pi))
3088 goto out;
3089
3090 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3091 (tlv.options & ICE_LINK_OVERRIDE_EN))
3092 cfg->link_fec_opt = tlv.fec_options;
3093 }
3094
3095 out:
3096 kfree(pcaps);
3097
3098 return status;
3099 }
3100
3101 /**
3102 * ice_get_link_status - get status of the HW network link
3103 * @pi: port information structure
3104 * @link_up: pointer to bool (true/false = linkup/linkdown)
3105 *
3106 * Variable link_up is true if link is up, false if link is down.
3107 * The variable link_up is invalid if status is non zero. As a
3108 * result of this call, link status reporting becomes enabled
3109 */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)3110 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3111 {
3112 struct ice_phy_info *phy_info;
3113 enum ice_status status = 0;
3114
3115 if (!pi || !link_up)
3116 return ICE_ERR_PARAM;
3117
3118 phy_info = &pi->phy;
3119
3120 if (phy_info->get_link_info) {
3121 status = ice_update_link_info(pi);
3122
3123 if (status)
3124 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3125 status);
3126 }
3127
3128 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3129
3130 return status;
3131 }
3132
3133 /**
3134 * ice_aq_set_link_restart_an
3135 * @pi: pointer to the port information structure
3136 * @ena_link: if true: enable link, if false: disable link
3137 * @cd: pointer to command details structure or NULL
3138 *
3139 * Sets up the link and restarts the Auto-Negotiation over the link.
3140 */
3141 enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)3142 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3143 struct ice_sq_cd *cd)
3144 {
3145 struct ice_aqc_restart_an *cmd;
3146 struct ice_aq_desc desc;
3147
3148 cmd = &desc.params.restart_an;
3149
3150 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3151
3152 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3153 cmd->lport_num = pi->lport;
3154 if (ena_link)
3155 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3156 else
3157 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3158
3159 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3160 }
3161
3162 /**
3163 * ice_aq_set_event_mask
3164 * @hw: pointer to the HW struct
3165 * @port_num: port number of the physical function
3166 * @mask: event mask to be set
3167 * @cd: pointer to command details structure or NULL
3168 *
3169 * Set event mask (0x0613)
3170 */
3171 enum ice_status
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)3172 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3173 struct ice_sq_cd *cd)
3174 {
3175 struct ice_aqc_set_event_mask *cmd;
3176 struct ice_aq_desc desc;
3177
3178 cmd = &desc.params.set_event_mask;
3179
3180 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3181
3182 cmd->lport_num = port_num;
3183
3184 cmd->event_mask = cpu_to_le16(mask);
3185 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3186 }
3187
3188 /**
3189 * ice_aq_set_mac_loopback
3190 * @hw: pointer to the HW struct
3191 * @ena_lpbk: Enable or Disable loopback
3192 * @cd: pointer to command details structure or NULL
3193 *
3194 * Enable/disable loopback on a given port
3195 */
3196 enum ice_status
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)3197 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3198 {
3199 struct ice_aqc_set_mac_lb *cmd;
3200 struct ice_aq_desc desc;
3201
3202 cmd = &desc.params.set_mac_lb;
3203
3204 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3205 if (ena_lpbk)
3206 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3207
3208 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3209 }
3210
3211 /**
3212 * ice_aq_set_port_id_led
3213 * @pi: pointer to the port information
3214 * @is_orig_mode: is this LED set to original mode (by the net-list)
3215 * @cd: pointer to command details structure or NULL
3216 *
3217 * Set LED value for the given port (0x06e9)
3218 */
3219 enum ice_status
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)3220 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3221 struct ice_sq_cd *cd)
3222 {
3223 struct ice_aqc_set_port_id_led *cmd;
3224 struct ice_hw *hw = pi->hw;
3225 struct ice_aq_desc desc;
3226
3227 cmd = &desc.params.set_port_id_led;
3228
3229 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3230
3231 if (is_orig_mode)
3232 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3233 else
3234 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3235
3236 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3237 }
3238
3239 /**
3240 * ice_aq_sff_eeprom
3241 * @hw: pointer to the HW struct
3242 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3243 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3244 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3245 * @page: QSFP page
3246 * @set_page: set or ignore the page
3247 * @data: pointer to data buffer to be read/written to the I2C device.
3248 * @length: 1-16 for read, 1 for write.
3249 * @write: 0 read, 1 for write.
3250 * @cd: pointer to command details structure or NULL
3251 *
3252 * Read/Write SFF EEPROM (0x06EE)
3253 */
3254 enum ice_status
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)3255 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3256 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3257 bool write, struct ice_sq_cd *cd)
3258 {
3259 struct ice_aqc_sff_eeprom *cmd;
3260 struct ice_aq_desc desc;
3261 enum ice_status status;
3262
3263 if (!data || (mem_addr & 0xff00))
3264 return ICE_ERR_PARAM;
3265
3266 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3267 cmd = &desc.params.read_write_sff_param;
3268 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3269 cmd->lport_num = (u8)(lport & 0xff);
3270 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3271 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3272 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3273 ((set_page <<
3274 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3275 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3276 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3277 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3278 if (write)
3279 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3280
3281 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3282 return status;
3283 }
3284
3285 /**
3286 * __ice_aq_get_set_rss_lut
3287 * @hw: pointer to the hardware structure
3288 * @params: RSS LUT parameters
3289 * @set: set true to set the table, false to get the table
3290 *
3291 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3292 */
3293 static enum ice_status
__ice_aq_get_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * params,bool set)3294 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3295 {
3296 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3297 struct ice_aqc_get_set_rss_lut *cmd_resp;
3298 struct ice_aq_desc desc;
3299 enum ice_status status;
3300 u8 *lut;
3301
3302 if (!params)
3303 return ICE_ERR_PARAM;
3304
3305 vsi_handle = params->vsi_handle;
3306 lut = params->lut;
3307
3308 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3309 return ICE_ERR_PARAM;
3310
3311 lut_size = params->lut_size;
3312 lut_type = params->lut_type;
3313 glob_lut_idx = params->global_lut_id;
3314 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3315
3316 cmd_resp = &desc.params.get_set_rss_lut;
3317
3318 if (set) {
3319 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3320 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3321 } else {
3322 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3323 }
3324
3325 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3326 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3327 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3328 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3329
3330 switch (lut_type) {
3331 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3332 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3333 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3334 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3335 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3336 break;
3337 default:
3338 status = ICE_ERR_PARAM;
3339 goto ice_aq_get_set_rss_lut_exit;
3340 }
3341
3342 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3343 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3344 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3345
3346 if (!set)
3347 goto ice_aq_get_set_rss_lut_send;
3348 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3349 if (!set)
3350 goto ice_aq_get_set_rss_lut_send;
3351 } else {
3352 goto ice_aq_get_set_rss_lut_send;
3353 }
3354
3355 /* LUT size is only valid for Global and PF table types */
3356 switch (lut_size) {
3357 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3358 break;
3359 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3360 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3361 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3362 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3363 break;
3364 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3365 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3366 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3367 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3368 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3369 break;
3370 }
3371 fallthrough;
3372 default:
3373 status = ICE_ERR_PARAM;
3374 goto ice_aq_get_set_rss_lut_exit;
3375 }
3376
3377 ice_aq_get_set_rss_lut_send:
3378 cmd_resp->flags = cpu_to_le16(flags);
3379 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3380
3381 ice_aq_get_set_rss_lut_exit:
3382 return status;
3383 }
3384
3385 /**
3386 * ice_aq_get_rss_lut
3387 * @hw: pointer to the hardware structure
3388 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3389 *
3390 * get the RSS lookup table, PF or VSI type
3391 */
3392 enum ice_status
ice_aq_get_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * get_params)3393 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3394 {
3395 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3396 }
3397
3398 /**
3399 * ice_aq_set_rss_lut
3400 * @hw: pointer to the hardware structure
3401 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3402 *
3403 * set the RSS lookup table, PF or VSI type
3404 */
3405 enum ice_status
ice_aq_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * set_params)3406 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3407 {
3408 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3409 }
3410
3411 /**
3412 * __ice_aq_get_set_rss_key
3413 * @hw: pointer to the HW struct
3414 * @vsi_id: VSI FW index
3415 * @key: pointer to key info struct
3416 * @set: set true to set the key, false to get the key
3417 *
3418 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3419 */
3420 static enum
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)3421 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3422 struct ice_aqc_get_set_rss_keys *key,
3423 bool set)
3424 {
3425 struct ice_aqc_get_set_rss_key *cmd_resp;
3426 u16 key_size = sizeof(*key);
3427 struct ice_aq_desc desc;
3428
3429 cmd_resp = &desc.params.get_set_rss_key;
3430
3431 if (set) {
3432 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3433 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3434 } else {
3435 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3436 }
3437
3438 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3439 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3440 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3441 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3442
3443 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3444 }
3445
3446 /**
3447 * ice_aq_get_rss_key
3448 * @hw: pointer to the HW struct
3449 * @vsi_handle: software VSI handle
3450 * @key: pointer to key info struct
3451 *
3452 * get the RSS key per VSI
3453 */
3454 enum ice_status
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)3455 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3456 struct ice_aqc_get_set_rss_keys *key)
3457 {
3458 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3459 return ICE_ERR_PARAM;
3460
3461 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3462 key, false);
3463 }
3464
3465 /**
3466 * ice_aq_set_rss_key
3467 * @hw: pointer to the HW struct
3468 * @vsi_handle: software VSI handle
3469 * @keys: pointer to key info struct
3470 *
3471 * set the RSS key per VSI
3472 */
3473 enum ice_status
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)3474 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3475 struct ice_aqc_get_set_rss_keys *keys)
3476 {
3477 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3478 return ICE_ERR_PARAM;
3479
3480 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3481 keys, true);
3482 }
3483
3484 /**
3485 * ice_aq_add_lan_txq
3486 * @hw: pointer to the hardware structure
3487 * @num_qgrps: Number of added queue groups
3488 * @qg_list: list of queue groups to be added
3489 * @buf_size: size of buffer for indirect command
3490 * @cd: pointer to command details structure or NULL
3491 *
3492 * Add Tx LAN queue (0x0C30)
3493 *
3494 * NOTE:
3495 * Prior to calling add Tx LAN queue:
3496 * Initialize the following as part of the Tx queue context:
3497 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3498 * Cache profile and Packet shaper profile.
3499 *
3500 * After add Tx LAN queue AQ command is completed:
3501 * Interrupts should be associated with specific queues,
3502 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3503 * flow.
3504 */
3505 static enum ice_status
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)3506 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3507 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3508 struct ice_sq_cd *cd)
3509 {
3510 struct ice_aqc_add_tx_qgrp *list;
3511 struct ice_aqc_add_txqs *cmd;
3512 struct ice_aq_desc desc;
3513 u16 i, sum_size = 0;
3514
3515 cmd = &desc.params.add_txqs;
3516
3517 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3518
3519 if (!qg_list)
3520 return ICE_ERR_PARAM;
3521
3522 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3523 return ICE_ERR_PARAM;
3524
3525 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3526 sum_size += struct_size(list, txqs, list->num_txqs);
3527 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3528 list->num_txqs);
3529 }
3530
3531 if (buf_size != sum_size)
3532 return ICE_ERR_PARAM;
3533
3534 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3535
3536 cmd->num_qgrps = num_qgrps;
3537
3538 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3539 }
3540
3541 /**
3542 * ice_aq_dis_lan_txq
3543 * @hw: pointer to the hardware structure
3544 * @num_qgrps: number of groups in the list
3545 * @qg_list: the list of groups to disable
3546 * @buf_size: the total size of the qg_list buffer in bytes
3547 * @rst_src: if called due to reset, specifies the reset source
3548 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3549 * @cd: pointer to command details structure or NULL
3550 *
3551 * Disable LAN Tx queue (0x0C31)
3552 */
3553 static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)3554 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3555 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3556 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3557 struct ice_sq_cd *cd)
3558 {
3559 struct ice_aqc_dis_txq_item *item;
3560 struct ice_aqc_dis_txqs *cmd;
3561 struct ice_aq_desc desc;
3562 enum ice_status status;
3563 u16 i, sz = 0;
3564
3565 cmd = &desc.params.dis_txqs;
3566 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3567
3568 /* qg_list can be NULL only in VM/VF reset flow */
3569 if (!qg_list && !rst_src)
3570 return ICE_ERR_PARAM;
3571
3572 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3573 return ICE_ERR_PARAM;
3574
3575 cmd->num_entries = num_qgrps;
3576
3577 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3578 ICE_AQC_Q_DIS_TIMEOUT_M);
3579
3580 switch (rst_src) {
3581 case ICE_VM_RESET:
3582 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3583 cmd->vmvf_and_timeout |=
3584 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3585 break;
3586 case ICE_VF_RESET:
3587 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3588 /* In this case, FW expects vmvf_num to be absolute VF ID */
3589 cmd->vmvf_and_timeout |=
3590 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3591 ICE_AQC_Q_DIS_VMVF_NUM_M);
3592 break;
3593 case ICE_NO_RESET:
3594 default:
3595 break;
3596 }
3597
3598 /* flush pipe on time out */
3599 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3600 /* If no queue group info, we are in a reset flow. Issue the AQ */
3601 if (!qg_list)
3602 goto do_aq;
3603
3604 /* set RD bit to indicate that command buffer is provided by the driver
3605 * and it needs to be read by the firmware
3606 */
3607 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3608
3609 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3610 u16 item_size = struct_size(item, q_id, item->num_qs);
3611
3612 /* If the num of queues is even, add 2 bytes of padding */
3613 if ((item->num_qs % 2) == 0)
3614 item_size += 2;
3615
3616 sz += item_size;
3617
3618 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3619 }
3620
3621 if (buf_size != sz)
3622 return ICE_ERR_PARAM;
3623
3624 do_aq:
3625 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3626 if (status) {
3627 if (!qg_list)
3628 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3629 vmvf_num, hw->adminq.sq_last_status);
3630 else
3631 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3632 le16_to_cpu(qg_list[0].q_id[0]),
3633 hw->adminq.sq_last_status);
3634 }
3635 return status;
3636 }
3637
3638 /* End of FW Admin Queue command wrappers */
3639
3640 /**
3641 * ice_write_byte - write a byte to a packed context structure
3642 * @src_ctx: the context structure to read from
3643 * @dest_ctx: the context to be written to
3644 * @ce_info: a description of the struct to be filled
3645 */
3646 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3647 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3648 {
3649 u8 src_byte, dest_byte, mask;
3650 u8 *from, *dest;
3651 u16 shift_width;
3652
3653 /* copy from the next struct field */
3654 from = src_ctx + ce_info->offset;
3655
3656 /* prepare the bits and mask */
3657 shift_width = ce_info->lsb % 8;
3658 mask = (u8)(BIT(ce_info->width) - 1);
3659
3660 src_byte = *from;
3661 src_byte &= mask;
3662
3663 /* shift to correct alignment */
3664 mask <<= shift_width;
3665 src_byte <<= shift_width;
3666
3667 /* get the current bits from the target bit string */
3668 dest = dest_ctx + (ce_info->lsb / 8);
3669
3670 memcpy(&dest_byte, dest, sizeof(dest_byte));
3671
3672 dest_byte &= ~mask; /* get the bits not changing */
3673 dest_byte |= src_byte; /* add in the new bits */
3674
3675 /* put it all back */
3676 memcpy(dest, &dest_byte, sizeof(dest_byte));
3677 }
3678
3679 /**
3680 * ice_write_word - write a word to a packed context structure
3681 * @src_ctx: the context structure to read from
3682 * @dest_ctx: the context to be written to
3683 * @ce_info: a description of the struct to be filled
3684 */
3685 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3686 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3687 {
3688 u16 src_word, mask;
3689 __le16 dest_word;
3690 u8 *from, *dest;
3691 u16 shift_width;
3692
3693 /* copy from the next struct field */
3694 from = src_ctx + ce_info->offset;
3695
3696 /* prepare the bits and mask */
3697 shift_width = ce_info->lsb % 8;
3698 mask = BIT(ce_info->width) - 1;
3699
3700 /* don't swizzle the bits until after the mask because the mask bits
3701 * will be in a different bit position on big endian machines
3702 */
3703 src_word = *(u16 *)from;
3704 src_word &= mask;
3705
3706 /* shift to correct alignment */
3707 mask <<= shift_width;
3708 src_word <<= shift_width;
3709
3710 /* get the current bits from the target bit string */
3711 dest = dest_ctx + (ce_info->lsb / 8);
3712
3713 memcpy(&dest_word, dest, sizeof(dest_word));
3714
3715 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
3716 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3717
3718 /* put it all back */
3719 memcpy(dest, &dest_word, sizeof(dest_word));
3720 }
3721
3722 /**
3723 * ice_write_dword - write a dword to a packed context structure
3724 * @src_ctx: the context structure to read from
3725 * @dest_ctx: the context to be written to
3726 * @ce_info: a description of the struct to be filled
3727 */
3728 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3729 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3730 {
3731 u32 src_dword, mask;
3732 __le32 dest_dword;
3733 u8 *from, *dest;
3734 u16 shift_width;
3735
3736 /* copy from the next struct field */
3737 from = src_ctx + ce_info->offset;
3738
3739 /* prepare the bits and mask */
3740 shift_width = ce_info->lsb % 8;
3741
3742 /* if the field width is exactly 32 on an x86 machine, then the shift
3743 * operation will not work because the SHL instructions count is masked
3744 * to 5 bits so the shift will do nothing
3745 */
3746 if (ce_info->width < 32)
3747 mask = BIT(ce_info->width) - 1;
3748 else
3749 mask = (u32)~0;
3750
3751 /* don't swizzle the bits until after the mask because the mask bits
3752 * will be in a different bit position on big endian machines
3753 */
3754 src_dword = *(u32 *)from;
3755 src_dword &= mask;
3756
3757 /* shift to correct alignment */
3758 mask <<= shift_width;
3759 src_dword <<= shift_width;
3760
3761 /* get the current bits from the target bit string */
3762 dest = dest_ctx + (ce_info->lsb / 8);
3763
3764 memcpy(&dest_dword, dest, sizeof(dest_dword));
3765
3766 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
3767 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
3768
3769 /* put it all back */
3770 memcpy(dest, &dest_dword, sizeof(dest_dword));
3771 }
3772
3773 /**
3774 * ice_write_qword - write a qword to a packed context structure
3775 * @src_ctx: the context structure to read from
3776 * @dest_ctx: the context to be written to
3777 * @ce_info: a description of the struct to be filled
3778 */
3779 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3780 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3781 {
3782 u64 src_qword, mask;
3783 __le64 dest_qword;
3784 u8 *from, *dest;
3785 u16 shift_width;
3786
3787 /* copy from the next struct field */
3788 from = src_ctx + ce_info->offset;
3789
3790 /* prepare the bits and mask */
3791 shift_width = ce_info->lsb % 8;
3792
3793 /* if the field width is exactly 64 on an x86 machine, then the shift
3794 * operation will not work because the SHL instructions count is masked
3795 * to 6 bits so the shift will do nothing
3796 */
3797 if (ce_info->width < 64)
3798 mask = BIT_ULL(ce_info->width) - 1;
3799 else
3800 mask = (u64)~0;
3801
3802 /* don't swizzle the bits until after the mask because the mask bits
3803 * will be in a different bit position on big endian machines
3804 */
3805 src_qword = *(u64 *)from;
3806 src_qword &= mask;
3807
3808 /* shift to correct alignment */
3809 mask <<= shift_width;
3810 src_qword <<= shift_width;
3811
3812 /* get the current bits from the target bit string */
3813 dest = dest_ctx + (ce_info->lsb / 8);
3814
3815 memcpy(&dest_qword, dest, sizeof(dest_qword));
3816
3817 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
3818 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
3819
3820 /* put it all back */
3821 memcpy(dest, &dest_qword, sizeof(dest_qword));
3822 }
3823
3824 /**
3825 * ice_set_ctx - set context bits in packed structure
3826 * @hw: pointer to the hardware structure
3827 * @src_ctx: pointer to a generic non-packed context structure
3828 * @dest_ctx: pointer to memory for the packed structure
3829 * @ce_info: a description of the structure to be transformed
3830 */
3831 enum ice_status
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3832 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3833 const struct ice_ctx_ele *ce_info)
3834 {
3835 int f;
3836
3837 for (f = 0; ce_info[f].width; f++) {
3838 /* We have to deal with each element of the FW response
3839 * using the correct size so that we are correct regardless
3840 * of the endianness of the machine.
3841 */
3842 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3843 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3844 f, ce_info[f].width, ce_info[f].size_of);
3845 continue;
3846 }
3847 switch (ce_info[f].size_of) {
3848 case sizeof(u8):
3849 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3850 break;
3851 case sizeof(u16):
3852 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3853 break;
3854 case sizeof(u32):
3855 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3856 break;
3857 case sizeof(u64):
3858 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3859 break;
3860 default:
3861 return ICE_ERR_INVAL_SIZE;
3862 }
3863 }
3864
3865 return 0;
3866 }
3867
3868 /**
3869 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3870 * @hw: pointer to the HW struct
3871 * @vsi_handle: software VSI handle
3872 * @tc: TC number
3873 * @q_handle: software queue handle
3874 */
3875 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)3876 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3877 {
3878 struct ice_vsi_ctx *vsi;
3879 struct ice_q_ctx *q_ctx;
3880
3881 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3882 if (!vsi)
3883 return NULL;
3884 if (q_handle >= vsi->num_lan_q_entries[tc])
3885 return NULL;
3886 if (!vsi->lan_q_ctx[tc])
3887 return NULL;
3888 q_ctx = vsi->lan_q_ctx[tc];
3889 return &q_ctx[q_handle];
3890 }
3891
3892 /**
3893 * ice_ena_vsi_txq
3894 * @pi: port information structure
3895 * @vsi_handle: software VSI handle
3896 * @tc: TC number
3897 * @q_handle: software queue handle
3898 * @num_qgrps: Number of added queue groups
3899 * @buf: list of queue groups to be added
3900 * @buf_size: size of buffer for indirect command
3901 * @cd: pointer to command details structure or NULL
3902 *
3903 * This function adds one LAN queue
3904 */
3905 enum ice_status
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)3906 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3907 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3908 struct ice_sq_cd *cd)
3909 {
3910 struct ice_aqc_txsched_elem_data node = { 0 };
3911 struct ice_sched_node *parent;
3912 struct ice_q_ctx *q_ctx;
3913 enum ice_status status;
3914 struct ice_hw *hw;
3915
3916 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3917 return ICE_ERR_CFG;
3918
3919 if (num_qgrps > 1 || buf->num_txqs > 1)
3920 return ICE_ERR_MAX_LIMIT;
3921
3922 hw = pi->hw;
3923
3924 if (!ice_is_vsi_valid(hw, vsi_handle))
3925 return ICE_ERR_PARAM;
3926
3927 mutex_lock(&pi->sched_lock);
3928
3929 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3930 if (!q_ctx) {
3931 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3932 q_handle);
3933 status = ICE_ERR_PARAM;
3934 goto ena_txq_exit;
3935 }
3936
3937 /* find a parent node */
3938 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3939 ICE_SCHED_NODE_OWNER_LAN);
3940 if (!parent) {
3941 status = ICE_ERR_PARAM;
3942 goto ena_txq_exit;
3943 }
3944
3945 buf->parent_teid = parent->info.node_teid;
3946 node.parent_teid = parent->info.node_teid;
3947 /* Mark that the values in the "generic" section as valid. The default
3948 * value in the "generic" section is zero. This means that :
3949 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3950 * - 0 priority among siblings, indicated by Bit 1-3.
3951 * - WFQ, indicated by Bit 4.
3952 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3953 * Bit 5-6.
3954 * - Bit 7 is reserved.
3955 * Without setting the generic section as valid in valid_sections, the
3956 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3957 */
3958 buf->txqs[0].info.valid_sections =
3959 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
3960 ICE_AQC_ELEM_VALID_EIR;
3961 buf->txqs[0].info.generic = 0;
3962 buf->txqs[0].info.cir_bw.bw_profile_idx =
3963 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3964 buf->txqs[0].info.cir_bw.bw_alloc =
3965 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3966 buf->txqs[0].info.eir_bw.bw_profile_idx =
3967 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3968 buf->txqs[0].info.eir_bw.bw_alloc =
3969 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3970
3971 /* add the LAN queue */
3972 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3973 if (status) {
3974 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3975 le16_to_cpu(buf->txqs[0].txq_id),
3976 hw->adminq.sq_last_status);
3977 goto ena_txq_exit;
3978 }
3979
3980 node.node_teid = buf->txqs[0].q_teid;
3981 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3982 q_ctx->q_handle = q_handle;
3983 q_ctx->q_teid = le32_to_cpu(node.node_teid);
3984
3985 /* add a leaf node into scheduler tree queue layer */
3986 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3987 if (!status)
3988 status = ice_sched_replay_q_bw(pi, q_ctx);
3989
3990 ena_txq_exit:
3991 mutex_unlock(&pi->sched_lock);
3992 return status;
3993 }
3994
3995 /**
3996 * ice_dis_vsi_txq
3997 * @pi: port information structure
3998 * @vsi_handle: software VSI handle
3999 * @tc: TC number
4000 * @num_queues: number of queues
4001 * @q_handles: pointer to software queue handle array
4002 * @q_ids: pointer to the q_id array
4003 * @q_teids: pointer to queue node teids
4004 * @rst_src: if called due to reset, specifies the reset source
4005 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4006 * @cd: pointer to command details structure or NULL
4007 *
4008 * This function removes queues and their corresponding nodes in SW DB
4009 */
4010 enum ice_status
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4011 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4012 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4013 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4014 struct ice_sq_cd *cd)
4015 {
4016 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4017 struct ice_aqc_dis_txq_item *qg_list;
4018 struct ice_q_ctx *q_ctx;
4019 struct ice_hw *hw;
4020 u16 i, buf_size;
4021
4022 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4023 return ICE_ERR_CFG;
4024
4025 hw = pi->hw;
4026
4027 if (!num_queues) {
4028 /* if queue is disabled already yet the disable queue command
4029 * has to be sent to complete the VF reset, then call
4030 * ice_aq_dis_lan_txq without any queue information
4031 */
4032 if (rst_src)
4033 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4034 vmvf_num, NULL);
4035 return ICE_ERR_CFG;
4036 }
4037
4038 buf_size = struct_size(qg_list, q_id, 1);
4039 qg_list = kzalloc(buf_size, GFP_KERNEL);
4040 if (!qg_list)
4041 return ICE_ERR_NO_MEMORY;
4042
4043 mutex_lock(&pi->sched_lock);
4044
4045 for (i = 0; i < num_queues; i++) {
4046 struct ice_sched_node *node;
4047
4048 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4049 if (!node)
4050 continue;
4051 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4052 if (!q_ctx) {
4053 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4054 q_handles[i]);
4055 continue;
4056 }
4057 if (q_ctx->q_handle != q_handles[i]) {
4058 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4059 q_ctx->q_handle, q_handles[i]);
4060 continue;
4061 }
4062 qg_list->parent_teid = node->info.parent_teid;
4063 qg_list->num_qs = 1;
4064 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4065 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4066 vmvf_num, cd);
4067
4068 if (status)
4069 break;
4070 ice_free_sched_node(pi, node);
4071 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4072 }
4073 mutex_unlock(&pi->sched_lock);
4074 kfree(qg_list);
4075 return status;
4076 }
4077
4078 /**
4079 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4080 * @pi: port information structure
4081 * @vsi_handle: software VSI handle
4082 * @tc_bitmap: TC bitmap
4083 * @maxqs: max queues array per TC
4084 * @owner: LAN or RDMA
4085 *
4086 * This function adds/updates the VSI queues per TC.
4087 */
4088 static enum ice_status
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u8 tc_bitmap,u16 * maxqs,u8 owner)4089 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4090 u16 *maxqs, u8 owner)
4091 {
4092 enum ice_status status = 0;
4093 u8 i;
4094
4095 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4096 return ICE_ERR_CFG;
4097
4098 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4099 return ICE_ERR_PARAM;
4100
4101 mutex_lock(&pi->sched_lock);
4102
4103 ice_for_each_traffic_class(i) {
4104 /* configuration is possible only if TC node is present */
4105 if (!ice_sched_get_tc_node(pi, i))
4106 continue;
4107
4108 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4109 ice_is_tc_ena(tc_bitmap, i));
4110 if (status)
4111 break;
4112 }
4113
4114 mutex_unlock(&pi->sched_lock);
4115 return status;
4116 }
4117
4118 /**
4119 * ice_cfg_vsi_lan - configure VSI LAN queues
4120 * @pi: port information structure
4121 * @vsi_handle: software VSI handle
4122 * @tc_bitmap: TC bitmap
4123 * @max_lanqs: max LAN queues array per TC
4124 *
4125 * This function adds/updates the VSI LAN queues per TC.
4126 */
4127 enum ice_status
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u8 tc_bitmap,u16 * max_lanqs)4128 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4129 u16 *max_lanqs)
4130 {
4131 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4132 ICE_SCHED_NODE_OWNER_LAN);
4133 }
4134
4135 /**
4136 * ice_replay_pre_init - replay pre initialization
4137 * @hw: pointer to the HW struct
4138 *
4139 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4140 */
ice_replay_pre_init(struct ice_hw * hw)4141 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4142 {
4143 struct ice_switch_info *sw = hw->switch_info;
4144 u8 i;
4145
4146 /* Delete old entries from replay filter list head if there is any */
4147 ice_rm_all_sw_replay_rule_info(hw);
4148 /* In start of replay, move entries into replay_rules list, it
4149 * will allow adding rules entries back to filt_rules list,
4150 * which is operational list.
4151 */
4152 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4153 list_replace_init(&sw->recp_list[i].filt_rules,
4154 &sw->recp_list[i].filt_replay_rules);
4155 ice_sched_replay_agg_vsi_preinit(hw);
4156
4157 return 0;
4158 }
4159
4160 /**
4161 * ice_replay_vsi - replay VSI configuration
4162 * @hw: pointer to the HW struct
4163 * @vsi_handle: driver VSI handle
4164 *
4165 * Restore all VSI configuration after reset. It is required to call this
4166 * function with main VSI first.
4167 */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)4168 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4169 {
4170 enum ice_status status;
4171
4172 if (!ice_is_vsi_valid(hw, vsi_handle))
4173 return ICE_ERR_PARAM;
4174
4175 /* Replay pre-initialization if there is any */
4176 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4177 status = ice_replay_pre_init(hw);
4178 if (status)
4179 return status;
4180 }
4181 /* Replay per VSI all RSS configurations */
4182 status = ice_replay_rss_cfg(hw, vsi_handle);
4183 if (status)
4184 return status;
4185 /* Replay per VSI all filters */
4186 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4187 if (!status)
4188 status = ice_replay_vsi_agg(hw, vsi_handle);
4189 return status;
4190 }
4191
4192 /**
4193 * ice_replay_post - post replay configuration cleanup
4194 * @hw: pointer to the HW struct
4195 *
4196 * Post replay cleanup.
4197 */
ice_replay_post(struct ice_hw * hw)4198 void ice_replay_post(struct ice_hw *hw)
4199 {
4200 /* Delete old entries from replay filter list head */
4201 ice_rm_all_sw_replay_rule_info(hw);
4202 ice_sched_replay_agg(hw);
4203 }
4204
4205 /**
4206 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4207 * @hw: ptr to the hardware info
4208 * @reg: offset of 64 bit HW register to read from
4209 * @prev_stat_loaded: bool to specify if previous stats are loaded
4210 * @prev_stat: ptr to previous loaded stat value
4211 * @cur_stat: ptr to current stat value
4212 */
4213 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)4214 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4215 u64 *prev_stat, u64 *cur_stat)
4216 {
4217 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4218
4219 /* device stats are not reset at PFR, they likely will not be zeroed
4220 * when the driver starts. Thus, save the value from the first read
4221 * without adding to the statistic value so that we report stats which
4222 * count up from zero.
4223 */
4224 if (!prev_stat_loaded) {
4225 *prev_stat = new_data;
4226 return;
4227 }
4228
4229 /* Calculate the difference between the new and old values, and then
4230 * add it to the software stat value.
4231 */
4232 if (new_data >= *prev_stat)
4233 *cur_stat += new_data - *prev_stat;
4234 else
4235 /* to manage the potential roll-over */
4236 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4237
4238 /* Update the previously stored value to prepare for next read */
4239 *prev_stat = new_data;
4240 }
4241
4242 /**
4243 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4244 * @hw: ptr to the hardware info
4245 * @reg: offset of HW register to read from
4246 * @prev_stat_loaded: bool to specify if previous stats are loaded
4247 * @prev_stat: ptr to previous loaded stat value
4248 * @cur_stat: ptr to current stat value
4249 */
4250 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)4251 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4252 u64 *prev_stat, u64 *cur_stat)
4253 {
4254 u32 new_data;
4255
4256 new_data = rd32(hw, reg);
4257
4258 /* device stats are not reset at PFR, they likely will not be zeroed
4259 * when the driver starts. Thus, save the value from the first read
4260 * without adding to the statistic value so that we report stats which
4261 * count up from zero.
4262 */
4263 if (!prev_stat_loaded) {
4264 *prev_stat = new_data;
4265 return;
4266 }
4267
4268 /* Calculate the difference between the new and old values, and then
4269 * add it to the software stat value.
4270 */
4271 if (new_data >= *prev_stat)
4272 *cur_stat += new_data - *prev_stat;
4273 else
4274 /* to manage the potential roll-over */
4275 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4276
4277 /* Update the previously stored value to prepare for next read */
4278 *prev_stat = new_data;
4279 }
4280
4281 /**
4282 * ice_sched_query_elem - query element information from HW
4283 * @hw: pointer to the HW struct
4284 * @node_teid: node TEID to be queried
4285 * @buf: buffer to element information
4286 *
4287 * This function queries HW element information
4288 */
4289 enum ice_status
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)4290 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4291 struct ice_aqc_txsched_elem_data *buf)
4292 {
4293 u16 buf_size, num_elem_ret = 0;
4294 enum ice_status status;
4295
4296 buf_size = sizeof(*buf);
4297 memset(buf, 0, buf_size);
4298 buf->node_teid = cpu_to_le32(node_teid);
4299 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4300 NULL);
4301 if (status || num_elem_ret != 1)
4302 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4303 return status;
4304 }
4305
4306 /**
4307 * ice_fw_supports_link_override
4308 * @hw: pointer to the hardware structure
4309 *
4310 * Checks if the firmware supports link override
4311 */
ice_fw_supports_link_override(struct ice_hw * hw)4312 bool ice_fw_supports_link_override(struct ice_hw *hw)
4313 {
4314 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4315 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4316 return true;
4317 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4318 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4319 return true;
4320 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4321 return true;
4322 }
4323
4324 return false;
4325 }
4326
4327 /**
4328 * ice_get_link_default_override
4329 * @ldo: pointer to the link default override struct
4330 * @pi: pointer to the port info struct
4331 *
4332 * Gets the link default override for a port
4333 */
4334 enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)4335 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4336 struct ice_port_info *pi)
4337 {
4338 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4339 struct ice_hw *hw = pi->hw;
4340 enum ice_status status;
4341
4342 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4343 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4344 if (status) {
4345 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4346 return status;
4347 }
4348
4349 /* Each port has its own config; calculate for our port */
4350 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4351 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4352
4353 /* link options first */
4354 status = ice_read_sr_word(hw, tlv_start, &buf);
4355 if (status) {
4356 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4357 return status;
4358 }
4359 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4360 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4361 ICE_LINK_OVERRIDE_PHY_CFG_S;
4362
4363 /* link PHY config */
4364 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4365 status = ice_read_sr_word(hw, offset, &buf);
4366 if (status) {
4367 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4368 return status;
4369 }
4370 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4371
4372 /* PHY types low */
4373 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4374 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4375 status = ice_read_sr_word(hw, (offset + i), &buf);
4376 if (status) {
4377 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4378 return status;
4379 }
4380 /* shift 16 bits at a time to fill 64 bits */
4381 ldo->phy_type_low |= ((u64)buf << (i * 16));
4382 }
4383
4384 /* PHY types high */
4385 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4386 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4387 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4388 status = ice_read_sr_word(hw, (offset + i), &buf);
4389 if (status) {
4390 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4391 return status;
4392 }
4393 /* shift 16 bits at a time to fill 64 bits */
4394 ldo->phy_type_high |= ((u64)buf << (i * 16));
4395 }
4396
4397 return status;
4398 }
4399
4400 /**
4401 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4402 * @caps: get PHY capability data
4403 */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)4404 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4405 {
4406 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4407 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4408 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4409 ICE_AQC_PHY_AN_EN_CLAUSE37))
4410 return true;
4411
4412 return false;
4413 }
4414
4415 /**
4416 * ice_aq_set_lldp_mib - Set the LLDP MIB
4417 * @hw: pointer to the HW struct
4418 * @mib_type: Local, Remote or both Local and Remote MIBs
4419 * @buf: pointer to the caller-supplied buffer to store the MIB block
4420 * @buf_size: size of the buffer (in bytes)
4421 * @cd: pointer to command details structure or NULL
4422 *
4423 * Set the LLDP MIB. (0x0A08)
4424 */
4425 enum ice_status
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)4426 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4427 struct ice_sq_cd *cd)
4428 {
4429 struct ice_aqc_lldp_set_local_mib *cmd;
4430 struct ice_aq_desc desc;
4431
4432 cmd = &desc.params.lldp_set_mib;
4433
4434 if (buf_size == 0 || !buf)
4435 return ICE_ERR_PARAM;
4436
4437 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4438
4439 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4440 desc.datalen = cpu_to_le16(buf_size);
4441
4442 cmd->type = mib_type;
4443 cmd->length = cpu_to_le16(buf_size);
4444
4445 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4446 }
4447
4448 /**
4449 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
4450 * @hw: pointer to HW struct
4451 */
ice_fw_supports_lldp_fltr_ctrl(struct ice_hw * hw)4452 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
4453 {
4454 if (hw->mac_type != ICE_MAC_E810)
4455 return false;
4456
4457 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
4458 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
4459 return true;
4460 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
4461 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
4462 return true;
4463 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
4464 return true;
4465 }
4466 return false;
4467 }
4468
4469 /**
4470 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
4471 * @hw: pointer to HW struct
4472 * @vsi_num: absolute HW index for VSI
4473 * @add: boolean for if adding or removing a filter
4474 */
4475 enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw * hw,u16 vsi_num,bool add)4476 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
4477 {
4478 struct ice_aqc_lldp_filter_ctrl *cmd;
4479 struct ice_aq_desc desc;
4480
4481 cmd = &desc.params.lldp_filter_ctrl;
4482
4483 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
4484
4485 if (add)
4486 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
4487 else
4488 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
4489
4490 cmd->vsi_num = cpu_to_le16(vsi_num);
4491
4492 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4493 }
4494
4495 /**
4496 * ice_fw_supports_report_dflt_cfg
4497 * @hw: pointer to the hardware structure
4498 *
4499 * Checks if the firmware supports report default configuration
4500 */
ice_fw_supports_report_dflt_cfg(struct ice_hw * hw)4501 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
4502 {
4503 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4504 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
4505 return true;
4506 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
4507 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
4508 return true;
4509 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4510 return true;
4511 }
4512 return false;
4513 }
4514