1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "ixgbe_x550.h"
36 #include "ixgbe_x540.h"
37 #include "ixgbe_type.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41
42 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
43 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
44 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
45 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
46
47 /**
48 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
49 * @hw: pointer to hardware structure
50 *
51 * Initialize the function pointers and assign the MAC type for X550.
52 * Does not touch the hardware.
53 **/
ixgbe_init_ops_X550(struct ixgbe_hw * hw)54 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
55 {
56 struct ixgbe_mac_info *mac = &hw->mac;
57 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
58 s32 ret_val;
59
60 DEBUGFUNC("ixgbe_init_ops_X550");
61
62 ret_val = ixgbe_init_ops_X540(hw);
63 mac->ops.dmac_config = ixgbe_dmac_config_X550;
64 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
65 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
66 mac->ops.setup_eee = NULL;
67 mac->ops.set_source_address_pruning =
68 ixgbe_set_source_address_pruning_X550;
69 mac->ops.set_ethertype_anti_spoofing =
70 ixgbe_set_ethertype_anti_spoofing_X550;
71
72 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
73 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
74 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
75 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
76 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
77 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
78 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
79 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
80 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
81
82 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
83 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
84 mac->ops.mdd_event = ixgbe_mdd_event_X550;
85 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
86 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
87 mac->ops.disable_rx = ixgbe_disable_rx_x550;
88 /* Manageability interface */
89 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
90 switch (hw->device_id) {
91 case IXGBE_DEV_ID_X550EM_X_1G_T:
92 hw->mac.ops.led_on = NULL;
93 hw->mac.ops.led_off = NULL;
94 break;
95 case IXGBE_DEV_ID_X550EM_X_10G_T:
96 case IXGBE_DEV_ID_X550EM_A_10G_T:
97 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
98 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
99 break;
100 default:
101 break;
102 }
103 return ret_val;
104 }
105
106 /**
107 * ixgbe_read_cs4227 - Read CS4227 register
108 * @hw: pointer to hardware structure
109 * @reg: register number to write
110 * @value: pointer to receive value read
111 *
112 * Returns status code
113 **/
ixgbe_read_cs4227(struct ixgbe_hw * hw,u16 reg,u16 * value)114 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
115 {
116 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
117 }
118
119 /**
120 * ixgbe_write_cs4227 - Write CS4227 register
121 * @hw: pointer to hardware structure
122 * @reg: register number to write
123 * @value: value to write to register
124 *
125 * Returns status code
126 **/
ixgbe_write_cs4227(struct ixgbe_hw * hw,u16 reg,u16 value)127 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
128 {
129 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
130 }
131
132 /**
133 * ixgbe_read_pe - Read register from port expander
134 * @hw: pointer to hardware structure
135 * @reg: register number to read
136 * @value: pointer to receive read value
137 *
138 * Returns status code
139 **/
ixgbe_read_pe(struct ixgbe_hw * hw,u8 reg,u8 * value)140 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
141 {
142 s32 status;
143
144 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
145 if (status != IXGBE_SUCCESS)
146 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
147 "port expander access failed with %d\n", status);
148 return status;
149 }
150
151 /**
152 * ixgbe_write_pe - Write register to port expander
153 * @hw: pointer to hardware structure
154 * @reg: register number to write
155 * @value: value to write
156 *
157 * Returns status code
158 **/
ixgbe_write_pe(struct ixgbe_hw * hw,u8 reg,u8 value)159 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
160 {
161 s32 status;
162
163 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
164 if (status != IXGBE_SUCCESS)
165 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
166 "port expander access failed with %d\n", status);
167 return status;
168 }
169
170 /**
171 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
172 * @hw: pointer to hardware structure
173 *
174 * This function assumes that the caller has acquired the proper semaphore.
175 * Returns error code
176 **/
ixgbe_reset_cs4227(struct ixgbe_hw * hw)177 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
178 {
179 s32 status;
180 u32 retry;
181 u16 value;
182 u8 reg;
183
184 /* Trigger hard reset. */
185 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
186 if (status != IXGBE_SUCCESS)
187 return status;
188 reg |= IXGBE_PE_BIT1;
189 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
190 if (status != IXGBE_SUCCESS)
191 return status;
192
193 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
194 if (status != IXGBE_SUCCESS)
195 return status;
196 reg &= ~IXGBE_PE_BIT1;
197 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
198 if (status != IXGBE_SUCCESS)
199 return status;
200
201 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
202 if (status != IXGBE_SUCCESS)
203 return status;
204 reg &= ~IXGBE_PE_BIT1;
205 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
206 if (status != IXGBE_SUCCESS)
207 return status;
208
209 usec_delay(IXGBE_CS4227_RESET_HOLD);
210
211 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
212 if (status != IXGBE_SUCCESS)
213 return status;
214 reg |= IXGBE_PE_BIT1;
215 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
216 if (status != IXGBE_SUCCESS)
217 return status;
218
219 /* Wait for the reset to complete. */
220 msec_delay(IXGBE_CS4227_RESET_DELAY);
221 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
222 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
223 &value);
224 if (status == IXGBE_SUCCESS &&
225 value == IXGBE_CS4227_EEPROM_LOAD_OK)
226 break;
227 msec_delay(IXGBE_CS4227_CHECK_DELAY);
228 }
229 if (retry == IXGBE_CS4227_RETRIES) {
230 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
231 "CS4227 reset did not complete.");
232 return IXGBE_ERR_PHY;
233 }
234
235 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
236 if (status != IXGBE_SUCCESS ||
237 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
238 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
239 "CS4227 EEPROM did not load successfully.");
240 return IXGBE_ERR_PHY;
241 }
242
243 return IXGBE_SUCCESS;
244 }
245
246 /**
247 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
248 * @hw: pointer to hardware structure
249 **/
ixgbe_check_cs4227(struct ixgbe_hw * hw)250 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
251 {
252 s32 status = IXGBE_SUCCESS;
253 u32 swfw_mask = hw->phy.phy_semaphore_mask;
254 u16 value = 0;
255 u8 retry;
256
257 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
258 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
259 if (status != IXGBE_SUCCESS) {
260 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
261 "semaphore failed with %d", status);
262 msec_delay(IXGBE_CS4227_CHECK_DELAY);
263 continue;
264 }
265
266 /* Get status of reset flow. */
267 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
268
269 if (status == IXGBE_SUCCESS &&
270 value == IXGBE_CS4227_RESET_COMPLETE)
271 goto out;
272
273 if (status != IXGBE_SUCCESS ||
274 value != IXGBE_CS4227_RESET_PENDING)
275 break;
276
277 /* Reset is pending. Wait and check again. */
278 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
279 msec_delay(IXGBE_CS4227_CHECK_DELAY);
280 }
281
282 /* If still pending, assume other instance failed. */
283 if (retry == IXGBE_CS4227_RETRIES) {
284 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
285 if (status != IXGBE_SUCCESS) {
286 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
287 "semaphore failed with %d", status);
288 return;
289 }
290 }
291
292 /* Reset the CS4227. */
293 status = ixgbe_reset_cs4227(hw);
294 if (status != IXGBE_SUCCESS) {
295 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
296 "CS4227 reset failed: %d", status);
297 goto out;
298 }
299
300 /* Reset takes so long, temporarily release semaphore in case the
301 * other driver instance is waiting for the reset indication.
302 */
303 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
304 IXGBE_CS4227_RESET_PENDING);
305 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
306 msec_delay(10);
307 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
308 if (status != IXGBE_SUCCESS) {
309 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
310 "semaphore failed with %d", status);
311 return;
312 }
313
314 /* Record completion for next time. */
315 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
316 IXGBE_CS4227_RESET_COMPLETE);
317
318 out:
319 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
320 msec_delay(hw->eeprom.semaphore_delay);
321 }
322
323 /**
324 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
325 * @hw: pointer to hardware structure
326 **/
ixgbe_setup_mux_ctl(struct ixgbe_hw * hw)327 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
328 {
329 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
330
331 if (hw->bus.lan_id) {
332 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
333 esdp |= IXGBE_ESDP_SDP1_DIR;
334 }
335 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
336 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
337 IXGBE_WRITE_FLUSH(hw);
338 }
339
340 /**
341 * ixgbe_identify_phy_x550em - Get PHY type based on device id
342 * @hw: pointer to hardware structure
343 *
344 * Returns error code
345 */
ixgbe_identify_phy_x550em(struct ixgbe_hw * hw)346 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
347 {
348 hw->mac.ops.set_lan_id(hw);
349
350 ixgbe_read_mng_if_sel_x550em(hw);
351
352 switch (hw->device_id) {
353 case IXGBE_DEV_ID_X550EM_A_SFP:
354 return ixgbe_identify_sfp_module_X550em(hw);
355 case IXGBE_DEV_ID_X550EM_X_SFP:
356 /* set up for CS4227 usage */
357 ixgbe_setup_mux_ctl(hw);
358 ixgbe_check_cs4227(hw);
359 /* Fallthrough */
360
361 case IXGBE_DEV_ID_X550EM_A_SFP_N:
362 return ixgbe_identify_sfp_module_X550em(hw);
363 break;
364 case IXGBE_DEV_ID_X550EM_X_KX4:
365 hw->phy.type = ixgbe_phy_x550em_kx4;
366 break;
367 case IXGBE_DEV_ID_X550EM_X_XFI:
368 hw->phy.type = ixgbe_phy_x550em_xfi;
369 break;
370 case IXGBE_DEV_ID_X550EM_X_KR:
371 case IXGBE_DEV_ID_X550EM_A_KR:
372 case IXGBE_DEV_ID_X550EM_A_KR_L:
373 hw->phy.type = ixgbe_phy_x550em_kr;
374 break;
375 case IXGBE_DEV_ID_X550EM_A_10G_T:
376 case IXGBE_DEV_ID_X550EM_X_10G_T:
377 return ixgbe_identify_phy_generic(hw);
378 case IXGBE_DEV_ID_X550EM_X_1G_T:
379 hw->phy.type = ixgbe_phy_ext_1g_t;
380 break;
381 case IXGBE_DEV_ID_X550EM_A_1G_T:
382 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
383 hw->phy.type = ixgbe_phy_fw;
384 if (hw->bus.lan_id)
385 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
386 else
387 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
388 break;
389 default:
390 break;
391 }
392 return IXGBE_SUCCESS;
393 }
394
395 /**
396 * ixgbe_fw_phy_activity - Perform an activity on a PHY
397 * @hw: pointer to hardware structure
398 * @activity: activity to perform
399 * @data: Pointer to 4 32-bit words of data
400 */
ixgbe_fw_phy_activity(struct ixgbe_hw * hw,u16 activity,u32 (* data)[FW_PHY_ACT_DATA_COUNT])401 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
402 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
403 {
404 union {
405 struct ixgbe_hic_phy_activity_req cmd;
406 struct ixgbe_hic_phy_activity_resp rsp;
407 } hic;
408 u16 retries = FW_PHY_ACT_RETRIES;
409 s32 rc;
410 u16 i;
411
412 do {
413 memset(&hic, 0, sizeof(hic));
414 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
415 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
416 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
417 hic.cmd.port_number = hw->bus.lan_id;
418 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
419 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
420 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
421
422 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
423 sizeof(hic.cmd),
424 IXGBE_HI_COMMAND_TIMEOUT,
425 TRUE);
426 if (rc != IXGBE_SUCCESS)
427 return rc;
428 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
429 FW_CEM_RESP_STATUS_SUCCESS) {
430 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
431 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
432 return IXGBE_SUCCESS;
433 }
434 usec_delay(20);
435 --retries;
436 } while (retries > 0);
437
438 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
439 }
440
441 static const struct {
442 u16 fw_speed;
443 ixgbe_link_speed phy_speed;
444 } ixgbe_fw_map[] = {
445 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
446 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
447 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
448 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
449 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
450 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
451 };
452
453 /**
454 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
455 * @hw: pointer to hardware structure
456 *
457 * Returns error code
458 */
ixgbe_get_phy_id_fw(struct ixgbe_hw * hw)459 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
460 {
461 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
462 u16 phy_speeds;
463 u16 phy_id_lo;
464 s32 rc;
465 u16 i;
466
467 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
468 if (rc)
469 return rc;
470
471 hw->phy.speeds_supported = 0;
472 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
473 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
474 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
475 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
476 }
477 if (!hw->phy.autoneg_advertised)
478 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
479
480 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
481 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
482 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
483 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
484 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
485 return IXGBE_ERR_PHY_ADDR_INVALID;
486 return IXGBE_SUCCESS;
487 }
488
489 /**
490 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
491 * @hw: pointer to hardware structure
492 *
493 * Returns error code
494 */
ixgbe_identify_phy_fw(struct ixgbe_hw * hw)495 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
496 {
497 if (hw->bus.lan_id)
498 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
499 else
500 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
501
502 hw->phy.type = ixgbe_phy_fw;
503 hw->phy.ops.read_reg = NULL;
504 hw->phy.ops.write_reg = NULL;
505 return ixgbe_get_phy_id_fw(hw);
506 }
507
508 /**
509 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
510 * @hw: pointer to hardware structure
511 *
512 * Returns error code
513 */
ixgbe_shutdown_fw_phy(struct ixgbe_hw * hw)514 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
515 {
516 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
517
518 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
519 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
520 }
521
ixgbe_read_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)522 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
523 u32 device_type, u16 *phy_data)
524 {
525 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
526 return IXGBE_NOT_IMPLEMENTED;
527 }
528
ixgbe_write_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)529 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
530 u32 device_type, u16 phy_data)
531 {
532 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
533 return IXGBE_NOT_IMPLEMENTED;
534 }
535
536 /**
537 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
538 * @hw: pointer to the hardware structure
539 * @addr: I2C bus address to read from
540 * @reg: I2C device register to read from
541 * @val: pointer to location to receive read value
542 *
543 * Returns an error code on error.
544 **/
ixgbe_read_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)545 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
546 u16 reg, u16 *val)
547 {
548 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
549 }
550
551 /**
552 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
553 * @hw: pointer to the hardware structure
554 * @addr: I2C bus address to read from
555 * @reg: I2C device register to read from
556 * @val: pointer to location to receive read value
557 *
558 * Returns an error code on error.
559 **/
560 static s32
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)561 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
562 u16 reg, u16 *val)
563 {
564 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
565 }
566
567 /**
568 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
569 * @hw: pointer to the hardware structure
570 * @addr: I2C bus address to write to
571 * @reg: I2C device register to write to
572 * @val: value to write
573 *
574 * Returns an error code on error.
575 **/
ixgbe_write_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)576 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
577 u8 addr, u16 reg, u16 val)
578 {
579 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
580 }
581
582 /**
583 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
584 * @hw: pointer to the hardware structure
585 * @addr: I2C bus address to write to
586 * @reg: I2C device register to write to
587 * @val: value to write
588 *
589 * Returns an error code on error.
590 **/
591 static s32
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)592 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
593 u8 addr, u16 reg, u16 val)
594 {
595 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
596 }
597
598 /**
599 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
600 * @hw: pointer to hardware structure
601 *
602 * Initialize the function pointers and for MAC type X550EM.
603 * Does not touch the hardware.
604 **/
ixgbe_init_ops_X550EM(struct ixgbe_hw * hw)605 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
606 {
607 struct ixgbe_mac_info *mac = &hw->mac;
608 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
609 struct ixgbe_phy_info *phy = &hw->phy;
610 s32 ret_val;
611
612 DEBUGFUNC("ixgbe_init_ops_X550EM");
613
614 /* Similar to X550 so start there. */
615 ret_val = ixgbe_init_ops_X550(hw);
616
617 /* Since this function eventually calls
618 * ixgbe_init_ops_540 by design, we are setting
619 * the pointers to NULL explicitly here to overwrite
620 * the values being set in the x540 function.
621 */
622
623 /* Bypass not supported in x550EM */
624 mac->ops.bypass_rw = NULL;
625 mac->ops.bypass_valid_rd = NULL;
626 mac->ops.bypass_set = NULL;
627 mac->ops.bypass_rd_eep = NULL;
628
629 /* FCOE not supported in x550EM */
630 mac->ops.get_san_mac_addr = NULL;
631 mac->ops.set_san_mac_addr = NULL;
632 mac->ops.get_wwn_prefix = NULL;
633 mac->ops.get_fcoe_boot_status = NULL;
634
635 /* IPsec not supported in x550EM */
636 mac->ops.disable_sec_rx_path = NULL;
637 mac->ops.enable_sec_rx_path = NULL;
638
639 /* AUTOC register is not present in x550EM. */
640 mac->ops.prot_autoc_read = NULL;
641 mac->ops.prot_autoc_write = NULL;
642
643 /* X550EM bus type is internal*/
644 hw->bus.type = ixgbe_bus_type_internal;
645 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
646
647
648 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
649 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
650 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
651 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
652 mac->ops.get_supported_physical_layer =
653 ixgbe_get_supported_physical_layer_X550em;
654
655 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
656 mac->ops.setup_fc = ixgbe_setup_fc_generic;
657 else
658 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
659
660 /* PHY */
661 phy->ops.init = ixgbe_init_phy_ops_X550em;
662 switch (hw->device_id) {
663 case IXGBE_DEV_ID_X550EM_A_1G_T:
664 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
665 mac->ops.setup_fc = NULL;
666 phy->ops.identify = ixgbe_identify_phy_fw;
667 phy->ops.set_phy_power = NULL;
668 phy->ops.get_firmware_version = NULL;
669 break;
670 case IXGBE_DEV_ID_X550EM_X_1G_T:
671 mac->ops.setup_fc = NULL;
672 phy->ops.identify = ixgbe_identify_phy_x550em;
673 phy->ops.set_phy_power = NULL;
674 break;
675 default:
676 phy->ops.identify = ixgbe_identify_phy_x550em;
677 }
678
679 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
680 phy->ops.set_phy_power = NULL;
681
682
683 /* EEPROM */
684 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
685 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
686 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
687 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
688 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
689 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
690 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
691 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
692
693 return ret_val;
694 }
695
696 /**
697 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
698 * @hw: pointer to hardware structure
699 */
ixgbe_setup_fw_link(struct ixgbe_hw * hw)700 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
701 {
702 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
703 s32 rc;
704 u16 i;
705
706 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
707 return 0;
708
709 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
710 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
711 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
712 return IXGBE_ERR_INVALID_LINK_SETTINGS;
713 }
714
715 switch (hw->fc.requested_mode) {
716 case ixgbe_fc_full:
717 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
718 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
719 break;
720 case ixgbe_fc_rx_pause:
721 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
722 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
723 break;
724 case ixgbe_fc_tx_pause:
725 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
726 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
727 break;
728 default:
729 break;
730 }
731
732 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
733 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
734 setup[0] |= ixgbe_fw_map[i].fw_speed;
735 }
736 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
737
738 if (hw->phy.eee_speeds_advertised)
739 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
740
741 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
742 if (rc)
743 return rc;
744 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
745 return IXGBE_ERR_OVERTEMP;
746 return IXGBE_SUCCESS;
747 }
748
749 /**
750 * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
751 * @hw: pointer to hardware structure
752 *
753 * Called at init time to set up flow control.
754 */
ixgbe_fc_autoneg_fw(struct ixgbe_hw * hw)755 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
756 {
757 if (hw->fc.requested_mode == ixgbe_fc_default)
758 hw->fc.requested_mode = ixgbe_fc_full;
759
760 return ixgbe_setup_fw_link(hw);
761 }
762
763 /**
764 * ixgbe_setup_eee_fw - Enable/disable EEE support
765 * @hw: pointer to the HW structure
766 * @enable_eee: boolean flag to enable EEE
767 *
768 * Enable/disable EEE based on enable_eee flag.
769 * This function controls EEE for firmware-based PHY implementations.
770 */
ixgbe_setup_eee_fw(struct ixgbe_hw * hw,bool enable_eee)771 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
772 {
773 if (!!hw->phy.eee_speeds_advertised == enable_eee)
774 return IXGBE_SUCCESS;
775 if (enable_eee)
776 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
777 else
778 hw->phy.eee_speeds_advertised = 0;
779 return hw->phy.ops.setup_link(hw);
780 }
781
782 /**
783 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
784 * @hw: pointer to hardware structure
785 *
786 * Initialize the function pointers and for MAC type X550EM_a.
787 * Does not touch the hardware.
788 **/
ixgbe_init_ops_X550EM_a(struct ixgbe_hw * hw)789 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
790 {
791 struct ixgbe_mac_info *mac = &hw->mac;
792 s32 ret_val;
793
794 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
795
796 /* Start with generic X550EM init */
797 ret_val = ixgbe_init_ops_X550EM(hw);
798
799 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
800 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
801 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
802 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
803 } else {
804 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
805 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
806 }
807 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
808 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
809
810 switch (mac->ops.get_media_type(hw)) {
811 case ixgbe_media_type_fiber:
812 mac->ops.setup_fc = NULL;
813 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
814 break;
815 case ixgbe_media_type_backplane:
816 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
817 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
818 break;
819 default:
820 break;
821 }
822
823 switch (hw->device_id) {
824 case IXGBE_DEV_ID_X550EM_A_1G_T:
825 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
826 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
827 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
828 mac->ops.setup_eee = ixgbe_setup_eee_fw;
829 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
830 IXGBE_LINK_SPEED_1GB_FULL;
831 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
832 break;
833 default:
834 break;
835 }
836
837 return ret_val;
838 }
839
840 /**
841 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
842 * @hw: pointer to hardware structure
843 *
844 * Initialize the function pointers and for MAC type X550EM_x.
845 * Does not touch the hardware.
846 **/
ixgbe_init_ops_X550EM_x(struct ixgbe_hw * hw)847 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
848 {
849 struct ixgbe_mac_info *mac = &hw->mac;
850 struct ixgbe_link_info *link = &hw->link;
851 s32 ret_val;
852
853 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
854
855 /* Start with generic X550EM init */
856 ret_val = ixgbe_init_ops_X550EM(hw);
857
858 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
859 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
860 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
861 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
862 link->ops.read_link = ixgbe_read_i2c_combined_generic;
863 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
864 link->ops.write_link = ixgbe_write_i2c_combined_generic;
865 link->ops.write_link_unlocked =
866 ixgbe_write_i2c_combined_generic_unlocked;
867 link->addr = IXGBE_CS4227;
868
869 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
870 mac->ops.setup_fc = NULL;
871 mac->ops.setup_eee = NULL;
872 mac->ops.init_led_link_act = NULL;
873 }
874
875 return ret_val;
876 }
877
878 /**
879 * ixgbe_dmac_config_X550
880 * @hw: pointer to hardware structure
881 *
882 * Configure DMA coalescing. If enabling dmac, dmac is activated.
883 * When disabling dmac, dmac enable dmac bit is cleared.
884 **/
ixgbe_dmac_config_X550(struct ixgbe_hw * hw)885 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
886 {
887 u32 reg, high_pri_tc;
888
889 DEBUGFUNC("ixgbe_dmac_config_X550");
890
891 /* Disable DMA coalescing before configuring */
892 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
893 reg &= ~IXGBE_DMACR_DMAC_EN;
894 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
895
896 /* Disable DMA Coalescing if the watchdog timer is 0 */
897 if (!hw->mac.dmac_config.watchdog_timer)
898 goto out;
899
900 ixgbe_dmac_config_tcs_X550(hw);
901
902 /* Configure DMA Coalescing Control Register */
903 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
904
905 /* Set the watchdog timer in units of 40.96 usec */
906 reg &= ~IXGBE_DMACR_DMACWT_MASK;
907 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
908
909 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
910 /* If fcoe is enabled, set high priority traffic class */
911 if (hw->mac.dmac_config.fcoe_en) {
912 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
913 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
914 IXGBE_DMACR_HIGH_PRI_TC_MASK);
915 }
916 reg |= IXGBE_DMACR_EN_MNG_IND;
917
918 /* Enable DMA coalescing after configuration */
919 reg |= IXGBE_DMACR_DMAC_EN;
920 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
921
922 out:
923 return IXGBE_SUCCESS;
924 }
925
926 /**
927 * ixgbe_dmac_config_tcs_X550
928 * @hw: pointer to hardware structure
929 *
930 * Configure DMA coalescing threshold per TC. The dmac enable bit must
931 * be cleared before configuring.
932 **/
ixgbe_dmac_config_tcs_X550(struct ixgbe_hw * hw)933 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
934 {
935 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
936
937 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
938
939 /* Configure DMA coalescing enabled */
940 switch (hw->mac.dmac_config.link_speed) {
941 case IXGBE_LINK_SPEED_10_FULL:
942 case IXGBE_LINK_SPEED_100_FULL:
943 pb_headroom = IXGBE_DMACRXT_100M;
944 break;
945 case IXGBE_LINK_SPEED_1GB_FULL:
946 pb_headroom = IXGBE_DMACRXT_1G;
947 break;
948 default:
949 pb_headroom = IXGBE_DMACRXT_10G;
950 break;
951 }
952
953 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
954 IXGBE_MHADD_MFS_SHIFT) / 1024);
955
956 /* Set the per Rx packet buffer receive threshold */
957 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
958 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
959 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
960
961 if (tc < hw->mac.dmac_config.num_tcs) {
962 /* Get Rx PB size */
963 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
964 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
965 IXGBE_RXPBSIZE_SHIFT;
966
967 /* Calculate receive buffer threshold in kilobytes */
968 if (rx_pb_size > pb_headroom)
969 rx_pb_size = rx_pb_size - pb_headroom;
970 else
971 rx_pb_size = 0;
972
973 /* Minimum of MFS shall be set for DMCTH */
974 reg |= (rx_pb_size > maxframe_size_kb) ?
975 rx_pb_size : maxframe_size_kb;
976 }
977 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
978 }
979 return IXGBE_SUCCESS;
980 }
981
982 /**
983 * ixgbe_dmac_update_tcs_X550
984 * @hw: pointer to hardware structure
985 *
986 * Disables dmac, updates per TC settings, and then enables dmac.
987 **/
ixgbe_dmac_update_tcs_X550(struct ixgbe_hw * hw)988 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
989 {
990 u32 reg;
991
992 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
993
994 /* Disable DMA coalescing before configuring */
995 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
996 reg &= ~IXGBE_DMACR_DMAC_EN;
997 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
998
999 ixgbe_dmac_config_tcs_X550(hw);
1000
1001 /* Enable DMA coalescing after configuration */
1002 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1003 reg |= IXGBE_DMACR_DMAC_EN;
1004 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1005
1006 return IXGBE_SUCCESS;
1007 }
1008
1009 /**
1010 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1011 * @hw: pointer to hardware structure
1012 *
1013 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1014 * ixgbe_hw struct in order to set up EEPROM access.
1015 **/
ixgbe_init_eeprom_params_X550(struct ixgbe_hw * hw)1016 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1017 {
1018 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1019 u32 eec;
1020 u16 eeprom_size;
1021
1022 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1023
1024 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1025 eeprom->semaphore_delay = 10;
1026 eeprom->type = ixgbe_flash;
1027
1028 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1029 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1030 IXGBE_EEC_SIZE_SHIFT);
1031 eeprom->word_size = 1 << (eeprom_size +
1032 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1033
1034 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1035 eeprom->type, eeprom->word_size);
1036 }
1037
1038 return IXGBE_SUCCESS;
1039 }
1040
1041 /**
1042 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
1043 * @hw: pointer to hardware structure
1044 * @enable: enable or disable source address pruning
1045 * @pool: Rx pool to set source address pruning for
1046 **/
ixgbe_set_source_address_pruning_X550(struct ixgbe_hw * hw,bool enable,unsigned int pool)1047 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1048 unsigned int pool)
1049 {
1050 u64 pfflp;
1051
1052 /* max rx pool is 63 */
1053 if (pool > 63)
1054 return;
1055
1056 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1057 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1058
1059 if (enable)
1060 pfflp |= (1ULL << pool);
1061 else
1062 pfflp &= ~(1ULL << pool);
1063
1064 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1065 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1066 }
1067
1068 /**
1069 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
1070 * @hw: pointer to hardware structure
1071 * @enable: enable or disable switch for Ethertype anti-spoofing
1072 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1073 *
1074 **/
ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw * hw,bool enable,int vf)1075 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1076 bool enable, int vf)
1077 {
1078 int vf_target_reg = vf >> 3;
1079 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1080 u32 pfvfspoof;
1081
1082 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1083
1084 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1085 if (enable)
1086 pfvfspoof |= (1 << vf_target_shift);
1087 else
1088 pfvfspoof &= ~(1 << vf_target_shift);
1089
1090 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1091 }
1092
1093 /**
1094 * ixgbe_iosf_wait - Wait for IOSF command completion
1095 * @hw: pointer to hardware structure
1096 * @ctrl: pointer to location to receive final IOSF control value
1097 *
1098 * Returns failing status on timeout
1099 *
1100 * Note: ctrl can be NULL if the IOSF control register value is not needed
1101 **/
ixgbe_iosf_wait(struct ixgbe_hw * hw,u32 * ctrl)1102 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1103 {
1104 u32 i, command = 0;
1105
1106 /* Check every 10 usec to see if the address cycle completed.
1107 * The SB IOSF BUSY bit will clear when the operation is
1108 * complete
1109 */
1110 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1111 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1112 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1113 break;
1114 usec_delay(10);
1115 }
1116 if (ctrl)
1117 *ctrl = command;
1118 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1119 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1120 return IXGBE_ERR_PHY;
1121 }
1122
1123 return IXGBE_SUCCESS;
1124 }
1125
1126 /**
1127 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1128 * of the IOSF device
1129 * @hw: pointer to hardware structure
1130 * @reg_addr: 32 bit PHY register to write
1131 * @device_type: 3 bit device type
1132 * @data: Data to write to the register
1133 **/
ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1134 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1135 u32 device_type, u32 data)
1136 {
1137 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1138 u32 command, error;
1139 s32 ret;
1140
1141 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1142 if (ret != IXGBE_SUCCESS)
1143 return ret;
1144
1145 ret = ixgbe_iosf_wait(hw, NULL);
1146 if (ret != IXGBE_SUCCESS)
1147 goto out;
1148
1149 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1150 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1151
1152 /* Write IOSF control register */
1153 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1154
1155 /* Write IOSF data register */
1156 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1157
1158 ret = ixgbe_iosf_wait(hw, &command);
1159
1160 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1161 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1162 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1163 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1164 "Failed to write, error %x\n", error);
1165 ret = IXGBE_ERR_PHY;
1166 }
1167
1168 out:
1169 ixgbe_release_swfw_semaphore(hw, gssr);
1170 return ret;
1171 }
1172
1173 /**
1174 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1175 * @hw: pointer to hardware structure
1176 * @reg_addr: 32 bit PHY register to write
1177 * @device_type: 3 bit device type
1178 * @data: Pointer to read data from the register
1179 **/
ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1180 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1181 u32 device_type, u32 *data)
1182 {
1183 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1184 u32 command, error;
1185 s32 ret;
1186
1187 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1188 if (ret != IXGBE_SUCCESS)
1189 return ret;
1190
1191 ret = ixgbe_iosf_wait(hw, NULL);
1192 if (ret != IXGBE_SUCCESS)
1193 goto out;
1194
1195 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1196 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1197
1198 /* Write IOSF control register */
1199 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1200
1201 ret = ixgbe_iosf_wait(hw, &command);
1202
1203 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1204 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1205 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1206 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1207 "Failed to read, error %x\n", error);
1208 ret = IXGBE_ERR_PHY;
1209 }
1210
1211 if (ret == IXGBE_SUCCESS)
1212 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1213
1214 out:
1215 ixgbe_release_swfw_semaphore(hw, gssr);
1216 return ret;
1217 }
1218
1219 /**
1220 * ixgbe_get_phy_token - Get the token for shared phy access
1221 * @hw: Pointer to hardware structure
1222 */
1223
ixgbe_get_phy_token(struct ixgbe_hw * hw)1224 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1225 {
1226 struct ixgbe_hic_phy_token_req token_cmd;
1227 s32 status;
1228
1229 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1230 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1231 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1232 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1233 token_cmd.port_number = hw->bus.lan_id;
1234 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1235 token_cmd.pad = 0;
1236 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1237 sizeof(token_cmd),
1238 IXGBE_HI_COMMAND_TIMEOUT,
1239 TRUE);
1240 if (status) {
1241 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1242 status);
1243 return status;
1244 }
1245 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1246 return IXGBE_SUCCESS;
1247 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1248 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1249 token_cmd.hdr.cmd_or_resp.ret_status);
1250 return IXGBE_ERR_FW_RESP_INVALID;
1251 }
1252
1253 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1254 return IXGBE_ERR_TOKEN_RETRY;
1255 }
1256
1257 /**
1258 * ixgbe_put_phy_token - Put the token for shared phy access
1259 * @hw: Pointer to hardware structure
1260 */
1261
ixgbe_put_phy_token(struct ixgbe_hw * hw)1262 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1263 {
1264 struct ixgbe_hic_phy_token_req token_cmd;
1265 s32 status;
1266
1267 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1268 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1269 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1270 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1271 token_cmd.port_number = hw->bus.lan_id;
1272 token_cmd.command_type = FW_PHY_TOKEN_REL;
1273 token_cmd.pad = 0;
1274 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1275 sizeof(token_cmd),
1276 IXGBE_HI_COMMAND_TIMEOUT,
1277 TRUE);
1278 if (status)
1279 return status;
1280 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1281 return IXGBE_SUCCESS;
1282
1283 DEBUGOUT("Put PHY Token host interface command failed");
1284 return IXGBE_ERR_FW_RESP_INVALID;
1285 }
1286
1287 /**
1288 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1289 * of the IOSF device
1290 * @hw: pointer to hardware structure
1291 * @reg_addr: 32 bit PHY register to write
1292 * @device_type: 3 bit device type
1293 * @data: Data to write to the register
1294 **/
ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1295 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1296 u32 device_type, u32 data)
1297 {
1298 struct ixgbe_hic_internal_phy_req write_cmd;
1299 s32 status;
1300 UNREFERENCED_1PARAMETER(device_type);
1301
1302 memset(&write_cmd, 0, sizeof(write_cmd));
1303 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1304 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1305 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1306 write_cmd.port_number = hw->bus.lan_id;
1307 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1308 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1309 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1310
1311 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1312 sizeof(write_cmd),
1313 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1314
1315 return status;
1316 }
1317
1318 /**
1319 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1320 * @hw: pointer to hardware structure
1321 * @reg_addr: 32 bit PHY register to write
1322 * @device_type: 3 bit device type
1323 * @data: Pointer to read data from the register
1324 **/
ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1325 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1326 u32 device_type, u32 *data)
1327 {
1328 union {
1329 struct ixgbe_hic_internal_phy_req cmd;
1330 struct ixgbe_hic_internal_phy_resp rsp;
1331 } hic;
1332 s32 status;
1333 UNREFERENCED_1PARAMETER(device_type);
1334
1335 memset(&hic, 0, sizeof(hic));
1336 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1337 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1338 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1339 hic.cmd.port_number = hw->bus.lan_id;
1340 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1341 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1342
1343 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1344 sizeof(hic.cmd),
1345 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1346
1347 /* Extract the register value from the response. */
1348 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1349
1350 return status;
1351 }
1352
1353 /**
1354 * ixgbe_disable_mdd_X550
1355 * @hw: pointer to hardware structure
1356 *
1357 * Disable malicious driver detection
1358 **/
ixgbe_disable_mdd_X550(struct ixgbe_hw * hw)1359 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1360 {
1361 u32 reg;
1362
1363 DEBUGFUNC("ixgbe_disable_mdd_X550");
1364
1365 /* Disable MDD for TX DMA and interrupt */
1366 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1367 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1368 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1369
1370 /* Disable MDD for RX and interrupt */
1371 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1372 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1373 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1374 }
1375
1376 /**
1377 * ixgbe_enable_mdd_X550
1378 * @hw: pointer to hardware structure
1379 *
1380 * Enable malicious driver detection
1381 **/
ixgbe_enable_mdd_X550(struct ixgbe_hw * hw)1382 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1383 {
1384 u32 reg;
1385
1386 DEBUGFUNC("ixgbe_enable_mdd_X550");
1387
1388 /* Enable MDD for TX DMA and interrupt */
1389 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1390 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1391 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1392
1393 /* Enable MDD for RX and interrupt */
1394 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1395 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1396 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1397 }
1398
1399 /**
1400 * ixgbe_restore_mdd_vf_X550
1401 * @hw: pointer to hardware structure
1402 * @vf: vf index
1403 *
1404 * Restore VF that was disabled during malicious driver detection event
1405 **/
ixgbe_restore_mdd_vf_X550(struct ixgbe_hw * hw,u32 vf)1406 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1407 {
1408 u32 idx, reg, num_qs, start_q, bitmask;
1409
1410 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1411
1412 /* Map VF to queues */
1413 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1414 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1415 case IXGBE_MRQC_VMDQRT8TCEN:
1416 num_qs = 8; /* 16 VFs / pools */
1417 bitmask = 0x000000FF;
1418 break;
1419 case IXGBE_MRQC_VMDQRSS32EN:
1420 case IXGBE_MRQC_VMDQRT4TCEN:
1421 num_qs = 4; /* 32 VFs / pools */
1422 bitmask = 0x0000000F;
1423 break;
1424 default: /* 64 VFs / pools */
1425 num_qs = 2;
1426 bitmask = 0x00000003;
1427 break;
1428 }
1429 start_q = vf * num_qs;
1430
1431 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1432 idx = start_q / 32;
1433 reg = 0;
1434 reg |= (bitmask << (start_q % 32));
1435 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1436 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1437 }
1438
1439 /**
1440 * ixgbe_mdd_event_X550
1441 * @hw: pointer to hardware structure
1442 * @vf_bitmap: vf bitmap of malicious vfs
1443 *
1444 * Handle malicious driver detection event.
1445 **/
ixgbe_mdd_event_X550(struct ixgbe_hw * hw,u32 * vf_bitmap)1446 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1447 {
1448 u32 wqbr;
1449 u32 i, j, reg, q, shift, vf, idx;
1450
1451 DEBUGFUNC("ixgbe_mdd_event_X550");
1452
1453 /* figure out pool size for mapping to vf's */
1454 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1455 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1456 case IXGBE_MRQC_VMDQRT8TCEN:
1457 shift = 3; /* 16 VFs / pools */
1458 break;
1459 case IXGBE_MRQC_VMDQRSS32EN:
1460 case IXGBE_MRQC_VMDQRT4TCEN:
1461 shift = 2; /* 32 VFs / pools */
1462 break;
1463 default:
1464 shift = 1; /* 64 VFs / pools */
1465 break;
1466 }
1467
1468 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1469 for (i = 0; i < 4; i++) {
1470 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1471 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1472
1473 if (!wqbr)
1474 continue;
1475
1476 /* Get malicious queue */
1477 for (j = 0; j < 32 && wqbr; j++) {
1478
1479 if (!(wqbr & (1 << j)))
1480 continue;
1481
1482 /* Get queue from bitmask */
1483 q = j + (i * 32);
1484
1485 /* Map queue to vf */
1486 vf = (q >> shift);
1487
1488 /* Set vf bit in vf_bitmap */
1489 idx = vf / 32;
1490 vf_bitmap[idx] |= (1 << (vf % 32));
1491 wqbr &= ~(1 << j);
1492 }
1493 }
1494 }
1495
1496 /**
1497 * ixgbe_get_media_type_X550em - Get media type
1498 * @hw: pointer to hardware structure
1499 *
1500 * Returns the media type (fiber, copper, backplane)
1501 */
ixgbe_get_media_type_X550em(struct ixgbe_hw * hw)1502 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1503 {
1504 enum ixgbe_media_type media_type;
1505
1506 DEBUGFUNC("ixgbe_get_media_type_X550em");
1507
1508 /* Detect if there is a copper PHY attached. */
1509 switch (hw->device_id) {
1510 case IXGBE_DEV_ID_X550EM_X_KR:
1511 case IXGBE_DEV_ID_X550EM_X_KX4:
1512 case IXGBE_DEV_ID_X550EM_X_XFI:
1513 case IXGBE_DEV_ID_X550EM_A_KR:
1514 case IXGBE_DEV_ID_X550EM_A_KR_L:
1515 media_type = ixgbe_media_type_backplane;
1516 break;
1517 case IXGBE_DEV_ID_X550EM_X_SFP:
1518 case IXGBE_DEV_ID_X550EM_A_SFP:
1519 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1520 case IXGBE_DEV_ID_X550EM_A_QSFP:
1521 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1522 media_type = ixgbe_media_type_fiber;
1523 break;
1524 case IXGBE_DEV_ID_X550EM_X_1G_T:
1525 case IXGBE_DEV_ID_X550EM_X_10G_T:
1526 case IXGBE_DEV_ID_X550EM_A_10G_T:
1527 media_type = ixgbe_media_type_copper;
1528 break;
1529 case IXGBE_DEV_ID_X550EM_A_SGMII:
1530 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1531 media_type = ixgbe_media_type_backplane;
1532 hw->phy.type = ixgbe_phy_sgmii;
1533 break;
1534 case IXGBE_DEV_ID_X550EM_A_1G_T:
1535 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1536 media_type = ixgbe_media_type_copper;
1537 break;
1538 default:
1539 media_type = ixgbe_media_type_unknown;
1540 break;
1541 }
1542 return media_type;
1543 }
1544
1545 /**
1546 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1547 * @hw: pointer to hardware structure
1548 * @linear: TRUE if SFP module is linear
1549 */
ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw * hw,bool * linear)1550 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1551 {
1552 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1553
1554 switch (hw->phy.sfp_type) {
1555 case ixgbe_sfp_type_not_present:
1556 return IXGBE_ERR_SFP_NOT_PRESENT;
1557 case ixgbe_sfp_type_da_cu_core0:
1558 case ixgbe_sfp_type_da_cu_core1:
1559 *linear = TRUE;
1560 break;
1561 case ixgbe_sfp_type_srlr_core0:
1562 case ixgbe_sfp_type_srlr_core1:
1563 case ixgbe_sfp_type_da_act_lmt_core0:
1564 case ixgbe_sfp_type_da_act_lmt_core1:
1565 case ixgbe_sfp_type_1g_sx_core0:
1566 case ixgbe_sfp_type_1g_sx_core1:
1567 case ixgbe_sfp_type_1g_lx_core0:
1568 case ixgbe_sfp_type_1g_lx_core1:
1569 *linear = FALSE;
1570 break;
1571 case ixgbe_sfp_type_unknown:
1572 case ixgbe_sfp_type_1g_cu_core0:
1573 case ixgbe_sfp_type_1g_cu_core1:
1574 default:
1575 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1576 }
1577
1578 return IXGBE_SUCCESS;
1579 }
1580
1581 /**
1582 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1583 * @hw: pointer to hardware structure
1584 *
1585 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1586 **/
ixgbe_identify_sfp_module_X550em(struct ixgbe_hw * hw)1587 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1588 {
1589 s32 status;
1590 bool linear;
1591
1592 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1593
1594 status = ixgbe_identify_module_generic(hw);
1595
1596 if (status != IXGBE_SUCCESS)
1597 return status;
1598
1599 /* Check if SFP module is supported */
1600 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1601
1602 return status;
1603 }
1604
1605 /**
1606 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1607 * @hw: pointer to hardware structure
1608 */
ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw * hw)1609 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1610 {
1611 s32 status;
1612 bool linear;
1613
1614 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1615
1616 /* Check if SFP module is supported */
1617 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1618
1619 if (status != IXGBE_SUCCESS)
1620 return status;
1621
1622 ixgbe_init_mac_link_ops_X550em(hw);
1623 hw->phy.ops.reset = NULL;
1624
1625 return IXGBE_SUCCESS;
1626 }
1627
1628 /**
1629 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1630 * internal PHY
1631 * @hw: pointer to hardware structure
1632 **/
ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw * hw)1633 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1634 {
1635 s32 status;
1636 u32 link_ctrl;
1637
1638 /* Restart auto-negotiation. */
1639 status = hw->mac.ops.read_iosf_sb_reg(hw,
1640 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1641 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1642
1643 if (status) {
1644 DEBUGOUT("Auto-negotiation did not complete\n");
1645 return status;
1646 }
1647
1648 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1649 status = hw->mac.ops.write_iosf_sb_reg(hw,
1650 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1651 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1652
1653 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1654 u32 flx_mask_st20;
1655
1656 /* Indicate to FW that AN restart has been asserted */
1657 status = hw->mac.ops.read_iosf_sb_reg(hw,
1658 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1659 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1660
1661 if (status) {
1662 DEBUGOUT("Auto-negotiation did not complete\n");
1663 return status;
1664 }
1665
1666 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1667 status = hw->mac.ops.write_iosf_sb_reg(hw,
1668 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1669 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1670 }
1671
1672 return status;
1673 }
1674
1675 /**
1676 * ixgbe_setup_sgmii - Set up link for sgmii
1677 * @hw: pointer to hardware structure
1678 * @speed: new link speed
1679 * @autoneg_wait: TRUE when waiting for completion is needed
1680 */
ixgbe_setup_sgmii(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1681 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1682 bool autoneg_wait)
1683 {
1684 struct ixgbe_mac_info *mac = &hw->mac;
1685 u32 lval, sval, flx_val;
1686 s32 rc;
1687
1688 rc = mac->ops.read_iosf_sb_reg(hw,
1689 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1690 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1691 if (rc)
1692 return rc;
1693
1694 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1695 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1696 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1697 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1698 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1699 rc = mac->ops.write_iosf_sb_reg(hw,
1700 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1701 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1702 if (rc)
1703 return rc;
1704
1705 rc = mac->ops.read_iosf_sb_reg(hw,
1706 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1707 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1708 if (rc)
1709 return rc;
1710
1711 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1712 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1713 rc = mac->ops.write_iosf_sb_reg(hw,
1714 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1715 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1716 if (rc)
1717 return rc;
1718
1719 rc = mac->ops.read_iosf_sb_reg(hw,
1720 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1721 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1722 if (rc)
1723 return rc;
1724
1725 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1726 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1727 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1728 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1729 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1730
1731 rc = mac->ops.write_iosf_sb_reg(hw,
1732 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1733 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1734 if (rc)
1735 return rc;
1736
1737 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1738 if (rc)
1739 return rc;
1740
1741 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1742 }
1743
1744 /**
1745 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1746 * @hw: pointer to hardware structure
1747 * @speed: new link speed
1748 * @autoneg_wait: TRUE when waiting for completion is needed
1749 */
ixgbe_setup_sgmii_fw(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1750 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1751 bool autoneg_wait)
1752 {
1753 struct ixgbe_mac_info *mac = &hw->mac;
1754 u32 lval, sval, flx_val;
1755 s32 rc;
1756
1757 rc = mac->ops.read_iosf_sb_reg(hw,
1758 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1759 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1760 if (rc)
1761 return rc;
1762
1763 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1764 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1765 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1766 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1767 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1768 rc = mac->ops.write_iosf_sb_reg(hw,
1769 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1770 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1771 if (rc)
1772 return rc;
1773
1774 rc = mac->ops.read_iosf_sb_reg(hw,
1775 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1776 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1777 if (rc)
1778 return rc;
1779
1780 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1781 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1782 rc = mac->ops.write_iosf_sb_reg(hw,
1783 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1784 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1785 if (rc)
1786 return rc;
1787
1788 rc = mac->ops.write_iosf_sb_reg(hw,
1789 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1790 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1791 if (rc)
1792 return rc;
1793
1794 rc = mac->ops.read_iosf_sb_reg(hw,
1795 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1796 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1797 if (rc)
1798 return rc;
1799
1800 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1801 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1802 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1803 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1804 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1805
1806 rc = mac->ops.write_iosf_sb_reg(hw,
1807 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1808 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1809 if (rc)
1810 return rc;
1811
1812 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1813
1814 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1815 }
1816
1817 /**
1818 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1819 * @hw: pointer to hardware structure
1820 */
ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw * hw)1821 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1822 {
1823 struct ixgbe_mac_info *mac = &hw->mac;
1824
1825 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1826
1827 switch (hw->mac.ops.get_media_type(hw)) {
1828 case ixgbe_media_type_fiber:
1829 /* CS4227 does not support autoneg, so disable the laser control
1830 * functions for SFP+ fiber
1831 */
1832 mac->ops.disable_tx_laser = NULL;
1833 mac->ops.enable_tx_laser = NULL;
1834 mac->ops.flap_tx_laser = NULL;
1835 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
1836 mac->ops.set_rate_select_speed =
1837 ixgbe_set_soft_rate_select_speed;
1838
1839 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
1840 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
1841 mac->ops.setup_mac_link =
1842 ixgbe_setup_mac_link_sfp_x550a;
1843 else
1844 mac->ops.setup_mac_link =
1845 ixgbe_setup_mac_link_sfp_x550em;
1846 break;
1847 case ixgbe_media_type_copper:
1848 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
1849 break;
1850 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1851 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
1852 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
1853 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
1854 mac->ops.check_link =
1855 ixgbe_check_mac_link_generic;
1856 } else {
1857 mac->ops.setup_link =
1858 ixgbe_setup_mac_link_t_X550em;
1859 }
1860 } else {
1861 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
1862 mac->ops.check_link = ixgbe_check_link_t_X550em;
1863 }
1864 break;
1865 case ixgbe_media_type_backplane:
1866 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
1867 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
1868 mac->ops.setup_link = ixgbe_setup_sgmii;
1869 break;
1870 default:
1871 break;
1872 }
1873 }
1874
1875 /**
1876 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
1877 * @hw: pointer to hardware structure
1878 * @speed: pointer to link speed
1879 * @autoneg: TRUE when autoneg or autotry is enabled
1880 */
ixgbe_get_link_capabilities_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)1881 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
1882 ixgbe_link_speed *speed,
1883 bool *autoneg)
1884 {
1885 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
1886
1887
1888 if (hw->phy.type == ixgbe_phy_fw) {
1889 *autoneg = TRUE;
1890 *speed = hw->phy.speeds_supported;
1891 return 0;
1892 }
1893
1894 /* SFP */
1895 if (hw->phy.media_type == ixgbe_media_type_fiber) {
1896
1897 /* CS4227 SFP must not enable auto-negotiation */
1898 *autoneg = FALSE;
1899
1900 /* Check if 1G SFP module. */
1901 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1902 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
1903 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1904 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
1905 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1906 return IXGBE_SUCCESS;
1907 }
1908
1909 /* Link capabilities are based on SFP */
1910 if (hw->phy.multispeed_fiber)
1911 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1912 IXGBE_LINK_SPEED_1GB_FULL;
1913 else
1914 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1915 } else {
1916 switch (hw->phy.type) {
1917 case ixgbe_phy_ext_1g_t:
1918 case ixgbe_phy_sgmii:
1919 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1920 break;
1921 case ixgbe_phy_x550em_kr:
1922 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1923 /* check different backplane modes */
1924 if (hw->phy.nw_mng_if_sel &
1925 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
1926 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
1927 break;
1928 } else if (hw->device_id ==
1929 IXGBE_DEV_ID_X550EM_A_KR_L) {
1930 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1931 break;
1932 }
1933 }
1934 /* fall through */
1935 default:
1936 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1937 IXGBE_LINK_SPEED_1GB_FULL;
1938 break;
1939 }
1940 *autoneg = TRUE;
1941 }
1942
1943 return IXGBE_SUCCESS;
1944 }
1945
1946 /**
1947 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
1948 * @hw: pointer to hardware structure
1949 * @lsc: pointer to boolean flag which indicates whether external Base T
1950 * PHY interrupt is lsc
1951 *
1952 * Determime if external Base T PHY interrupt cause is high temperature
1953 * failure alarm or link status change.
1954 *
1955 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
1956 * failure alarm, else return PHY access status.
1957 */
ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw * hw,bool * lsc)1958 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
1959 {
1960 u32 status;
1961 u16 reg;
1962
1963 *lsc = FALSE;
1964
1965 /* Vendor alarm triggered */
1966 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
1967 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1968 ®);
1969
1970 if (status != IXGBE_SUCCESS ||
1971 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
1972 return status;
1973
1974 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
1975 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
1976 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1977 ®);
1978
1979 if (status != IXGBE_SUCCESS ||
1980 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
1981 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
1982 return status;
1983
1984 /* Global alarm triggered */
1985 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
1986 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1987 ®);
1988
1989 if (status != IXGBE_SUCCESS)
1990 return status;
1991
1992 /* If high temperature failure, then return over temp error and exit */
1993 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
1994 /* power down the PHY in case the PHY FW didn't already */
1995 ixgbe_set_copper_phy_power(hw, FALSE);
1996 return IXGBE_ERR_OVERTEMP;
1997 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
1998 /* device fault alarm triggered */
1999 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2000 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2001 ®);
2002
2003 if (status != IXGBE_SUCCESS)
2004 return status;
2005
2006 /* if device fault was due to high temp alarm handle and exit */
2007 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2008 /* power down the PHY in case the PHY FW didn't */
2009 ixgbe_set_copper_phy_power(hw, FALSE);
2010 return IXGBE_ERR_OVERTEMP;
2011 }
2012 }
2013
2014 /* Vendor alarm 2 triggered */
2015 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2016 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2017
2018 if (status != IXGBE_SUCCESS ||
2019 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2020 return status;
2021
2022 /* link connect/disconnect event occurred */
2023 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2024 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2025
2026 if (status != IXGBE_SUCCESS)
2027 return status;
2028
2029 /* Indicate LSC */
2030 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2031 *lsc = TRUE;
2032
2033 return IXGBE_SUCCESS;
2034 }
2035
2036 /**
2037 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2038 * @hw: pointer to hardware structure
2039 *
2040 * Enable link status change and temperature failure alarm for the external
2041 * Base T PHY
2042 *
2043 * Returns PHY access status
2044 */
ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw * hw)2045 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2046 {
2047 u32 status;
2048 u16 reg;
2049 bool lsc;
2050
2051 /* Clear interrupt flags */
2052 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2053
2054 /* Enable link status change alarm */
2055
2056 /* Enable the LASI interrupts on X552 devices to receive notifications
2057 * of the link configurations of the external PHY and correspondingly
2058 * support the configuration of the internal iXFI link, since iXFI does
2059 * not support auto-negotiation. This is not required for X553 devices
2060 * having KR support, which performs auto-negotiations and which is used
2061 * as the internal link to the external PHY. Hence adding a check here
2062 * to avoid enabling LASI interrupts for X553 devices.
2063 */
2064 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2065 status = hw->phy.ops.read_reg(hw,
2066 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2067 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2068
2069 if (status != IXGBE_SUCCESS)
2070 return status;
2071
2072 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2073
2074 status = hw->phy.ops.write_reg(hw,
2075 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2076 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2077
2078 if (status != IXGBE_SUCCESS)
2079 return status;
2080 }
2081
2082 /* Enable high temperature failure and global fault alarms */
2083 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2084 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2085 ®);
2086
2087 if (status != IXGBE_SUCCESS)
2088 return status;
2089
2090 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2091 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2092
2093 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2094 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2095 reg);
2096
2097 if (status != IXGBE_SUCCESS)
2098 return status;
2099
2100 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2101 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2102 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2103 ®);
2104
2105 if (status != IXGBE_SUCCESS)
2106 return status;
2107
2108 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2109 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2110
2111 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2112 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2113 reg);
2114
2115 if (status != IXGBE_SUCCESS)
2116 return status;
2117
2118 /* Enable chip-wide vendor alarm */
2119 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2120 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2121 ®);
2122
2123 if (status != IXGBE_SUCCESS)
2124 return status;
2125
2126 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2127
2128 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2129 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2130 reg);
2131
2132 return status;
2133 }
2134
2135 /**
2136 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2137 * @hw: pointer to hardware structure
2138 * @speed: link speed
2139 *
2140 * Configures the integrated KR PHY.
2141 **/
ixgbe_setup_kr_speed_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed)2142 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2143 ixgbe_link_speed speed)
2144 {
2145 s32 status;
2146 u32 reg_val;
2147
2148 status = hw->mac.ops.read_iosf_sb_reg(hw,
2149 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2150 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2151 if (status)
2152 return status;
2153
2154 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2155 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2156 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2157
2158 /* Advertise 10G support. */
2159 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2160 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2161
2162 /* Advertise 1G support. */
2163 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2164 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2165
2166 status = hw->mac.ops.write_iosf_sb_reg(hw,
2167 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2168 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2169
2170 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2171 /* Set lane mode to KR auto negotiation */
2172 status = hw->mac.ops.read_iosf_sb_reg(hw,
2173 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2174 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2175
2176 if (status)
2177 return status;
2178
2179 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2180 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2181 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2182 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2183 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2184
2185 status = hw->mac.ops.write_iosf_sb_reg(hw,
2186 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2187 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2188 }
2189
2190 return ixgbe_restart_an_internal_phy_x550em(hw);
2191 }
2192
2193 /**
2194 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2195 * @hw: pointer to hardware structure
2196 */
ixgbe_reset_phy_fw(struct ixgbe_hw * hw)2197 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2198 {
2199 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2200 s32 rc;
2201
2202 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2203 return IXGBE_SUCCESS;
2204
2205 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2206 if (rc)
2207 return rc;
2208 memset(store, 0, sizeof(store));
2209
2210 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2211 if (rc)
2212 return rc;
2213
2214 return ixgbe_setup_fw_link(hw);
2215 }
2216
2217 /**
2218 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2219 * @hw: pointer to hardware structure
2220 */
ixgbe_check_overtemp_fw(struct ixgbe_hw * hw)2221 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2222 {
2223 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2224 s32 rc;
2225
2226 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2227 if (rc)
2228 return rc;
2229
2230 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2231 ixgbe_shutdown_fw_phy(hw);
2232 return IXGBE_ERR_OVERTEMP;
2233 }
2234 return IXGBE_SUCCESS;
2235 }
2236
2237 /**
2238 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2239 * @hw: pointer to hardware structure
2240 *
2241 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2242 * values.
2243 **/
ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw * hw)2244 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2245 {
2246 /* Save NW management interface connected on board. This is used
2247 * to determine internal PHY mode.
2248 */
2249 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2250
2251 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2252 * PHY address. This register field was has only been used for X552.
2253 */
2254 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2255 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2256 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2257 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2258 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2259 }
2260
2261 return IXGBE_SUCCESS;
2262 }
2263
2264 /**
2265 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2266 * @hw: pointer to hardware structure
2267 *
2268 * Initialize any function pointers that were not able to be
2269 * set during init_shared_code because the PHY/SFP type was
2270 * not known. Perform the SFP init if necessary.
2271 */
ixgbe_init_phy_ops_X550em(struct ixgbe_hw * hw)2272 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2273 {
2274 struct ixgbe_phy_info *phy = &hw->phy;
2275 s32 ret_val;
2276
2277 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2278
2279 hw->mac.ops.set_lan_id(hw);
2280 ixgbe_read_mng_if_sel_x550em(hw);
2281
2282 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2283 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2284 ixgbe_setup_mux_ctl(hw);
2285 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2286 }
2287
2288 switch (hw->device_id) {
2289 case IXGBE_DEV_ID_X550EM_A_1G_T:
2290 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2291 phy->ops.read_reg_mdi = NULL;
2292 phy->ops.write_reg_mdi = NULL;
2293 hw->phy.ops.read_reg = NULL;
2294 hw->phy.ops.write_reg = NULL;
2295 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2296 if (hw->bus.lan_id)
2297 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2298 else
2299 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2300
2301 break;
2302 case IXGBE_DEV_ID_X550EM_A_10G_T:
2303 case IXGBE_DEV_ID_X550EM_A_SFP:
2304 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2305 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2306 if (hw->bus.lan_id)
2307 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2308 else
2309 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2310 break;
2311 case IXGBE_DEV_ID_X550EM_X_SFP:
2312 /* set up for CS4227 usage */
2313 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2314 break;
2315 case IXGBE_DEV_ID_X550EM_X_1G_T:
2316 phy->ops.read_reg_mdi = NULL;
2317 phy->ops.write_reg_mdi = NULL;
2318 default:
2319 break;
2320 }
2321
2322 /* Identify the PHY or SFP module */
2323 ret_val = phy->ops.identify(hw);
2324 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2325 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2326 return ret_val;
2327
2328 /* Setup function pointers based on detected hardware */
2329 ixgbe_init_mac_link_ops_X550em(hw);
2330 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2331 phy->ops.reset = NULL;
2332
2333 /* Set functions pointers based on phy type */
2334 switch (hw->phy.type) {
2335 case ixgbe_phy_x550em_kx4:
2336 phy->ops.setup_link = NULL;
2337 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2338 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2339 break;
2340 case ixgbe_phy_x550em_kr:
2341 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2342 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2343 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2344 break;
2345 case ixgbe_phy_ext_1g_t:
2346 /* link is managed by FW */
2347 phy->ops.setup_link = NULL;
2348 phy->ops.reset = NULL;
2349 break;
2350 case ixgbe_phy_x550em_xfi:
2351 /* link is managed by HW */
2352 phy->ops.setup_link = NULL;
2353 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2354 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2355 break;
2356 case ixgbe_phy_x550em_ext_t:
2357 /* If internal link mode is XFI, then setup iXFI internal link,
2358 * else setup KR now.
2359 */
2360 phy->ops.setup_internal_link =
2361 ixgbe_setup_internal_phy_t_x550em;
2362
2363 /* setup SW LPLU only for first revision of X550EM_x */
2364 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2365 !(IXGBE_FUSES0_REV_MASK &
2366 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2367 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2368
2369 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2370 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2371 break;
2372 case ixgbe_phy_sgmii:
2373 phy->ops.setup_link = NULL;
2374 break;
2375 case ixgbe_phy_fw:
2376 phy->ops.setup_link = ixgbe_setup_fw_link;
2377 phy->ops.reset = ixgbe_reset_phy_fw;
2378 break;
2379 default:
2380 break;
2381 }
2382 return ret_val;
2383 }
2384
2385 /**
2386 * ixgbe_set_mdio_speed - Set MDIO clock speed
2387 * @hw: pointer to hardware structure
2388 */
ixgbe_set_mdio_speed(struct ixgbe_hw * hw)2389 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2390 {
2391 u32 hlreg0;
2392
2393 switch (hw->device_id) {
2394 case IXGBE_DEV_ID_X550EM_X_10G_T:
2395 case IXGBE_DEV_ID_X550EM_A_SGMII:
2396 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2397 case IXGBE_DEV_ID_X550EM_A_10G_T:
2398 case IXGBE_DEV_ID_X550EM_A_SFP:
2399 case IXGBE_DEV_ID_X550EM_A_QSFP:
2400 /* Config MDIO clock speed before the first MDIO PHY access */
2401 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2402 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2403 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2404 break;
2405 case IXGBE_DEV_ID_X550EM_A_1G_T:
2406 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2407 /* Select fast MDIO clock speed for these devices */
2408 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2409 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2410 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2411 break;
2412 default:
2413 break;
2414 }
2415 }
2416
2417 /**
2418 * ixgbe_reset_hw_X550em - Perform hardware reset
2419 * @hw: pointer to hardware structure
2420 *
2421 * Resets the hardware by resetting the transmit and receive units, masks
2422 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2423 * reset.
2424 */
ixgbe_reset_hw_X550em(struct ixgbe_hw * hw)2425 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2426 {
2427 ixgbe_link_speed link_speed;
2428 s32 status;
2429 u32 ctrl = 0;
2430 u32 i;
2431 bool link_up = FALSE;
2432 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2433
2434 DEBUGFUNC("ixgbe_reset_hw_X550em");
2435
2436 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2437 status = hw->mac.ops.stop_adapter(hw);
2438 if (status != IXGBE_SUCCESS) {
2439 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2440 return status;
2441 }
2442 /* flush pending Tx transactions */
2443 ixgbe_clear_tx_pending(hw);
2444
2445 ixgbe_set_mdio_speed(hw);
2446
2447 /* PHY ops must be identified and initialized prior to reset */
2448 status = hw->phy.ops.init(hw);
2449
2450 if (status)
2451 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2452 status);
2453
2454 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2455 status == IXGBE_ERR_PHY_ADDR_INVALID) {
2456 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2457 return status;
2458 }
2459
2460 /* start the external PHY */
2461 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2462 status = ixgbe_init_ext_t_x550em(hw);
2463 if (status) {
2464 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2465 status);
2466 return status;
2467 }
2468 }
2469
2470 /* Setup SFP module if there is one present. */
2471 if (hw->phy.sfp_setup_needed) {
2472 status = hw->mac.ops.setup_sfp(hw);
2473 hw->phy.sfp_setup_needed = FALSE;
2474 }
2475
2476 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2477 return status;
2478
2479 /* Reset PHY */
2480 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2481 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2482 return IXGBE_ERR_OVERTEMP;
2483 }
2484
2485 mac_reset_top:
2486 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2487 * If link reset is used when link is up, it might reset the PHY when
2488 * mng is using it. If link is down or the flag to force full link
2489 * reset is set, then perform link reset.
2490 */
2491 ctrl = IXGBE_CTRL_LNK_RST;
2492 if (!hw->force_full_reset) {
2493 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2494 if (link_up)
2495 ctrl = IXGBE_CTRL_RST;
2496 }
2497
2498 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2499 if (status != IXGBE_SUCCESS) {
2500 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2501 "semaphore failed with %d", status);
2502 return IXGBE_ERR_SWFW_SYNC;
2503 }
2504 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2505 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2506 IXGBE_WRITE_FLUSH(hw);
2507 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2508
2509 /* Poll for reset bit to self-clear meaning reset is complete */
2510 for (i = 0; i < 10; i++) {
2511 usec_delay(1);
2512 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2513 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2514 break;
2515 }
2516
2517 if (ctrl & IXGBE_CTRL_RST_MASK) {
2518 status = IXGBE_ERR_RESET_FAILED;
2519 DEBUGOUT("Reset polling failed to complete.\n");
2520 }
2521
2522 msec_delay(50);
2523
2524 /* Double resets are required for recovery from certain error
2525 * conditions. Between resets, it is necessary to stall to
2526 * allow time for any pending HW events to complete.
2527 */
2528 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2529 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2530 goto mac_reset_top;
2531 }
2532
2533 /* Store the permanent mac address */
2534 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2535
2536 /* Store MAC address from RAR0, clear receive address registers, and
2537 * clear the multicast table. Also reset num_rar_entries to 128,
2538 * since we modify this value when programming the SAN MAC address.
2539 */
2540 hw->mac.num_rar_entries = 128;
2541 hw->mac.ops.init_rx_addrs(hw);
2542
2543 ixgbe_set_mdio_speed(hw);
2544
2545 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2546 ixgbe_setup_mux_ctl(hw);
2547
2548 if (status != IXGBE_SUCCESS)
2549 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2550
2551 return status;
2552 }
2553
2554 /**
2555 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2556 * @hw: pointer to hardware structure
2557 */
ixgbe_init_ext_t_x550em(struct ixgbe_hw * hw)2558 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2559 {
2560 u32 status;
2561 u16 reg;
2562
2563 status = hw->phy.ops.read_reg(hw,
2564 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2565 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2566 ®);
2567
2568 if (status != IXGBE_SUCCESS)
2569 return status;
2570
2571 /* If PHY FW reset completed bit is set then this is the first
2572 * SW instance after a power on so the PHY FW must be un-stalled.
2573 */
2574 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2575 status = hw->phy.ops.read_reg(hw,
2576 IXGBE_MDIO_GLOBAL_RES_PR_10,
2577 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2578 ®);
2579
2580 if (status != IXGBE_SUCCESS)
2581 return status;
2582
2583 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2584
2585 status = hw->phy.ops.write_reg(hw,
2586 IXGBE_MDIO_GLOBAL_RES_PR_10,
2587 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2588 reg);
2589
2590 if (status != IXGBE_SUCCESS)
2591 return status;
2592 }
2593
2594 return status;
2595 }
2596
2597 /**
2598 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2599 * @hw: pointer to hardware structure
2600 **/
ixgbe_setup_kr_x550em(struct ixgbe_hw * hw)2601 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2602 {
2603 /* leave link alone for 2.5G */
2604 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2605 return IXGBE_SUCCESS;
2606
2607 if (ixgbe_check_reset_blocked(hw))
2608 return 0;
2609
2610 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2611 }
2612
2613 /**
2614 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2615 * @hw: pointer to hardware structure
2616 * @speed: new link speed
2617 * @autoneg_wait_to_complete: unused
2618 *
2619 * Configure the external PHY and the integrated KR PHY for SFP support.
2620 **/
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2621 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2622 ixgbe_link_speed speed,
2623 bool autoneg_wait_to_complete)
2624 {
2625 s32 ret_val;
2626 u16 reg_slice, reg_val;
2627 bool setup_linear = FALSE;
2628 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2629
2630 /* Check if SFP module is supported and linear */
2631 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2632
2633 /* If no SFP module present, then return success. Return success since
2634 * there is no reason to configure CS4227 and SFP not present error is
2635 * not excepted in the setup MAC link flow.
2636 */
2637 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2638 return IXGBE_SUCCESS;
2639
2640 if (ret_val != IXGBE_SUCCESS)
2641 return ret_val;
2642
2643 /* Configure internal PHY for KR/KX. */
2644 ixgbe_setup_kr_speed_x550em(hw, speed);
2645
2646 /* Configure CS4227 LINE side to proper mode. */
2647 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2648 (hw->bus.lan_id << 12);
2649 if (setup_linear)
2650 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2651 else
2652 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2653 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2654 reg_val);
2655 return ret_val;
2656 }
2657
2658 /**
2659 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2660 * @hw: pointer to hardware structure
2661 * @speed: the link speed to force
2662 *
2663 * Configures the integrated PHY for native SFI mode. Used to connect the
2664 * internal PHY directly to an SFP cage, without autonegotiation.
2665 **/
ixgbe_setup_sfi_x550a(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2666 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2667 {
2668 struct ixgbe_mac_info *mac = &hw->mac;
2669 s32 status;
2670 u32 reg_val;
2671
2672 /* Disable all AN and force speed to 10G Serial. */
2673 status = mac->ops.read_iosf_sb_reg(hw,
2674 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2675 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2676 if (status != IXGBE_SUCCESS)
2677 return status;
2678
2679 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2680 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2681 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2682 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2683
2684 /* Select forced link speed for internal PHY. */
2685 switch (*speed) {
2686 case IXGBE_LINK_SPEED_10GB_FULL:
2687 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2688 break;
2689 case IXGBE_LINK_SPEED_1GB_FULL:
2690 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2691 break;
2692 default:
2693 /* Other link speeds are not supported by internal PHY. */
2694 return IXGBE_ERR_LINK_SETUP;
2695 }
2696
2697 status = mac->ops.write_iosf_sb_reg(hw,
2698 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2699 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2700
2701 /* Toggle port SW reset by AN reset. */
2702 status = ixgbe_restart_an_internal_phy_x550em(hw);
2703
2704 return status;
2705 }
2706
2707 /**
2708 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2709 * @hw: pointer to hardware structure
2710 * @speed: new link speed
2711 * @autoneg_wait_to_complete: unused
2712 *
2713 * Configure the the integrated PHY for SFP support.
2714 **/
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2715 s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2716 ixgbe_link_speed speed,
2717 bool autoneg_wait_to_complete)
2718 {
2719 s32 ret_val;
2720 u16 reg_phy_ext;
2721 bool setup_linear = FALSE;
2722 u32 reg_slice, reg_phy_int, slice_offset;
2723
2724 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2725
2726 /* Check if SFP module is supported and linear */
2727 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2728
2729 /* If no SFP module present, then return success. Return success since
2730 * SFP not present error is not excepted in the setup MAC link flow.
2731 */
2732 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2733 return IXGBE_SUCCESS;
2734
2735 if (ret_val != IXGBE_SUCCESS)
2736 return ret_val;
2737
2738 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2739 /* Configure internal PHY for native SFI based on module type */
2740 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2741 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2742 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2743
2744 if (ret_val != IXGBE_SUCCESS)
2745 return ret_val;
2746
2747 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2748 if (!setup_linear)
2749 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2750
2751 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2752 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2753 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2754
2755 if (ret_val != IXGBE_SUCCESS)
2756 return ret_val;
2757
2758 /* Setup SFI internal link. */
2759 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2760 } else {
2761 /* Configure internal PHY for KR/KX. */
2762 ixgbe_setup_kr_speed_x550em(hw, speed);
2763
2764 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2765 /* Find Address */
2766 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2767 return IXGBE_ERR_PHY_ADDR_INVALID;
2768 }
2769
2770 /* Get external PHY SKU id */
2771 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2772 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2773
2774 if (ret_val != IXGBE_SUCCESS)
2775 return ret_val;
2776
2777 /* When configuring quad port CS4223, the MAC instance is part
2778 * of the slice offset.
2779 */
2780 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2781 slice_offset = (hw->bus.lan_id +
2782 (hw->bus.instance_id << 1)) << 12;
2783 else
2784 slice_offset = hw->bus.lan_id << 12;
2785
2786 /* Configure CS4227/CS4223 LINE side to proper mode. */
2787 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2788
2789 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2790 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2791
2792 if (ret_val != IXGBE_SUCCESS)
2793 return ret_val;
2794
2795 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2796 (IXGBE_CS4227_EDC_MODE_SR << 1));
2797
2798 if (setup_linear)
2799 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2800 else
2801 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2802 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2803 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2804
2805 /* Flush previous write with a read */
2806 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2807 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2808 }
2809 return ret_val;
2810 }
2811
2812 /**
2813 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
2814 * @hw: pointer to hardware structure
2815 *
2816 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
2817 **/
ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw * hw)2818 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
2819 {
2820 struct ixgbe_mac_info *mac = &hw->mac;
2821 s32 status;
2822 u32 reg_val;
2823
2824 /* Disable training protocol FSM. */
2825 status = mac->ops.read_iosf_sb_reg(hw,
2826 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2827 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2828 if (status != IXGBE_SUCCESS)
2829 return status;
2830 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
2831 status = mac->ops.write_iosf_sb_reg(hw,
2832 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
2833 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2834 if (status != IXGBE_SUCCESS)
2835 return status;
2836
2837 /* Disable Flex from training TXFFE. */
2838 status = mac->ops.read_iosf_sb_reg(hw,
2839 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2840 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2841 if (status != IXGBE_SUCCESS)
2842 return status;
2843 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2844 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2845 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2846 status = mac->ops.write_iosf_sb_reg(hw,
2847 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
2848 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2849 if (status != IXGBE_SUCCESS)
2850 return status;
2851 status = mac->ops.read_iosf_sb_reg(hw,
2852 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2853 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2854 if (status != IXGBE_SUCCESS)
2855 return status;
2856 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
2857 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
2858 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
2859 status = mac->ops.write_iosf_sb_reg(hw,
2860 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
2861 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2862 if (status != IXGBE_SUCCESS)
2863 return status;
2864
2865 /* Enable override for coefficients. */
2866 status = mac->ops.read_iosf_sb_reg(hw,
2867 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2868 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2869 if (status != IXGBE_SUCCESS)
2870 return status;
2871 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
2872 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
2873 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
2874 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
2875 status = mac->ops.write_iosf_sb_reg(hw,
2876 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
2877 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2878 return status;
2879 }
2880
2881 /**
2882 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
2883 * @hw: pointer to hardware structure
2884 * @speed: the link speed to force
2885 *
2886 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
2887 * internal and external PHY at a specific speed, without autonegotiation.
2888 **/
ixgbe_setup_ixfi_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2889 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2890 {
2891 struct ixgbe_mac_info *mac = &hw->mac;
2892 s32 status;
2893 u32 reg_val;
2894
2895 /* iXFI is only supported with X552 */
2896 if (mac->type != ixgbe_mac_X550EM_x)
2897 return IXGBE_ERR_LINK_SETUP;
2898
2899 /* Disable AN and force speed to 10G Serial. */
2900 status = mac->ops.read_iosf_sb_reg(hw,
2901 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2902 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2903 if (status != IXGBE_SUCCESS)
2904 return status;
2905
2906 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2907 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
2908
2909 /* Select forced link speed for internal PHY. */
2910 switch (*speed) {
2911 case IXGBE_LINK_SPEED_10GB_FULL:
2912 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
2913 break;
2914 case IXGBE_LINK_SPEED_1GB_FULL:
2915 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
2916 break;
2917 default:
2918 /* Other link speeds are not supported by internal KR PHY. */
2919 return IXGBE_ERR_LINK_SETUP;
2920 }
2921
2922 status = mac->ops.write_iosf_sb_reg(hw,
2923 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2924 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2925 if (status != IXGBE_SUCCESS)
2926 return status;
2927
2928 /* Additional configuration needed for x550em_x */
2929 if (hw->mac.type == ixgbe_mac_X550EM_x) {
2930 status = ixgbe_setup_ixfi_x550em_x(hw);
2931 if (status != IXGBE_SUCCESS)
2932 return status;
2933 }
2934
2935 /* Toggle port SW reset by AN reset. */
2936 status = ixgbe_restart_an_internal_phy_x550em(hw);
2937
2938 return status;
2939 }
2940
2941 /**
2942 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
2943 * @hw: address of hardware structure
2944 * @link_up: address of boolean to indicate link status
2945 *
2946 * Returns error code if unable to get link status.
2947 */
ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw * hw,bool * link_up)2948 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
2949 {
2950 u32 ret;
2951 u16 autoneg_status;
2952
2953 *link_up = FALSE;
2954
2955 /* read this twice back to back to indicate current status */
2956 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2957 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2958 &autoneg_status);
2959 if (ret != IXGBE_SUCCESS)
2960 return ret;
2961
2962 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
2963 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2964 &autoneg_status);
2965 if (ret != IXGBE_SUCCESS)
2966 return ret;
2967
2968 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
2969
2970 return IXGBE_SUCCESS;
2971 }
2972
2973 /**
2974 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
2975 * @hw: point to hardware structure
2976 *
2977 * Configures the link between the integrated KR PHY and the external X557 PHY
2978 * The driver will call this function when it gets a link status change
2979 * interrupt from the X557 PHY. This function configures the link speed
2980 * between the PHYs to match the link speed of the BASE-T link.
2981 *
2982 * A return of a non-zero value indicates an error, and the base driver should
2983 * not report link up.
2984 */
ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw * hw)2985 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
2986 {
2987 ixgbe_link_speed force_speed;
2988 bool link_up;
2989 u32 status;
2990 u16 speed;
2991
2992 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2993 return IXGBE_ERR_CONFIG;
2994
2995 if (hw->mac.type == ixgbe_mac_X550EM_x &&
2996 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
2997 /* If link is down, there is no setup necessary so return */
2998 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2999 if (status != IXGBE_SUCCESS)
3000 return status;
3001
3002 if (!link_up)
3003 return IXGBE_SUCCESS;
3004
3005 status = hw->phy.ops.read_reg(hw,
3006 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3007 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3008 &speed);
3009 if (status != IXGBE_SUCCESS)
3010 return status;
3011
3012 /* If link is still down - no setup is required so return */
3013 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3014 if (status != IXGBE_SUCCESS)
3015 return status;
3016 if (!link_up)
3017 return IXGBE_SUCCESS;
3018
3019 /* clear everything but the speed and duplex bits */
3020 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3021
3022 switch (speed) {
3023 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3024 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3025 break;
3026 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3027 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3028 break;
3029 default:
3030 /* Internal PHY does not support anything else */
3031 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3032 }
3033
3034 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3035 } else {
3036 speed = IXGBE_LINK_SPEED_10GB_FULL |
3037 IXGBE_LINK_SPEED_1GB_FULL;
3038 return ixgbe_setup_kr_speed_x550em(hw, speed);
3039 }
3040 }
3041
3042 /**
3043 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3044 * @hw: pointer to hardware structure
3045 *
3046 * Configures the integrated KR PHY to use internal loopback mode.
3047 **/
ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw * hw)3048 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3049 {
3050 s32 status;
3051 u32 reg_val;
3052
3053 /* Disable AN and force speed to 10G Serial. */
3054 status = hw->mac.ops.read_iosf_sb_reg(hw,
3055 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3056 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3057 if (status != IXGBE_SUCCESS)
3058 return status;
3059 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3060 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3061 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3062 status = hw->mac.ops.write_iosf_sb_reg(hw,
3063 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3064 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3065 if (status != IXGBE_SUCCESS)
3066 return status;
3067
3068 /* Set near-end loopback clocks. */
3069 status = hw->mac.ops.read_iosf_sb_reg(hw,
3070 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3071 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3072 if (status != IXGBE_SUCCESS)
3073 return status;
3074 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3075 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3076 status = hw->mac.ops.write_iosf_sb_reg(hw,
3077 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3078 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3079 if (status != IXGBE_SUCCESS)
3080 return status;
3081
3082 /* Set loopback enable. */
3083 status = hw->mac.ops.read_iosf_sb_reg(hw,
3084 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3085 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3086 if (status != IXGBE_SUCCESS)
3087 return status;
3088 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3089 status = hw->mac.ops.write_iosf_sb_reg(hw,
3090 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3091 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3092 if (status != IXGBE_SUCCESS)
3093 return status;
3094
3095 /* Training bypass. */
3096 status = hw->mac.ops.read_iosf_sb_reg(hw,
3097 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3098 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3099 if (status != IXGBE_SUCCESS)
3100 return status;
3101 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3102 status = hw->mac.ops.write_iosf_sb_reg(hw,
3103 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3104 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3105
3106 return status;
3107 }
3108
3109 /**
3110 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3111 * assuming that the semaphore is already obtained.
3112 * @hw: pointer to hardware structure
3113 * @offset: offset of word in the EEPROM to read
3114 * @data: word read from the EEPROM
3115 *
3116 * Reads a 16 bit word from the EEPROM using the hostif.
3117 **/
ixgbe_read_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 * data)3118 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3119 {
3120 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3121 struct ixgbe_hic_read_shadow_ram buffer;
3122 s32 status;
3123
3124 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3125 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3126 buffer.hdr.req.buf_lenh = 0;
3127 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3128 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3129
3130 /* convert offset from words to bytes */
3131 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3132 /* one word */
3133 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3134 buffer.pad2 = 0;
3135 buffer.pad3 = 0;
3136
3137 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3138 if (status)
3139 return status;
3140
3141 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3142 IXGBE_HI_COMMAND_TIMEOUT);
3143 if (!status) {
3144 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3145 FW_NVM_DATA_OFFSET);
3146 }
3147
3148 hw->mac.ops.release_swfw_sync(hw, mask);
3149 return status;
3150 }
3151
3152 /**
3153 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3154 * @hw: pointer to hardware structure
3155 * @offset: offset of word in the EEPROM to read
3156 * @words: number of words
3157 * @data: word(s) read from the EEPROM
3158 *
3159 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3160 **/
ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3161 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3162 u16 offset, u16 words, u16 *data)
3163 {
3164 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3165 struct ixgbe_hic_read_shadow_ram buffer;
3166 u32 current_word = 0;
3167 u16 words_to_read;
3168 s32 status;
3169 u32 i;
3170
3171 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3172
3173 /* Take semaphore for the entire operation. */
3174 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3175 if (status) {
3176 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3177 return status;
3178 }
3179
3180 while (words) {
3181 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3182 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3183 else
3184 words_to_read = words;
3185
3186 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3187 buffer.hdr.req.buf_lenh = 0;
3188 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3189 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3190
3191 /* convert offset from words to bytes */
3192 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3193 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3194 buffer.pad2 = 0;
3195 buffer.pad3 = 0;
3196
3197 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3198 IXGBE_HI_COMMAND_TIMEOUT);
3199
3200 if (status) {
3201 DEBUGOUT("Host interface command failed\n");
3202 goto out;
3203 }
3204
3205 for (i = 0; i < words_to_read; i++) {
3206 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3207 2 * i;
3208 u32 value = IXGBE_READ_REG(hw, reg);
3209
3210 data[current_word] = (u16)(value & 0xffff);
3211 current_word++;
3212 i++;
3213 if (i < words_to_read) {
3214 value >>= 16;
3215 data[current_word] = (u16)(value & 0xffff);
3216 current_word++;
3217 }
3218 }
3219 words -= words_to_read;
3220 }
3221
3222 out:
3223 hw->mac.ops.release_swfw_sync(hw, mask);
3224 return status;
3225 }
3226
3227 /**
3228 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3229 * @hw: pointer to hardware structure
3230 * @offset: offset of word in the EEPROM to write
3231 * @data: word write to the EEPROM
3232 *
3233 * Write a 16 bit word to the EEPROM using the hostif.
3234 **/
ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3235 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3236 u16 data)
3237 {
3238 s32 status;
3239 struct ixgbe_hic_write_shadow_ram buffer;
3240
3241 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3242
3243 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3244 buffer.hdr.req.buf_lenh = 0;
3245 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3246 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3247
3248 /* one word */
3249 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3250 buffer.data = data;
3251 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3252
3253 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3254 sizeof(buffer),
3255 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3256
3257 return status;
3258 }
3259
3260 /**
3261 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3262 * @hw: pointer to hardware structure
3263 * @offset: offset of word in the EEPROM to write
3264 * @data: word write to the EEPROM
3265 *
3266 * Write a 16 bit word to the EEPROM using the hostif.
3267 **/
ixgbe_write_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3268 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3269 u16 data)
3270 {
3271 s32 status = IXGBE_SUCCESS;
3272
3273 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3274
3275 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3276 IXGBE_SUCCESS) {
3277 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3278 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3279 } else {
3280 DEBUGOUT("write ee hostif failed to get semaphore");
3281 status = IXGBE_ERR_SWFW_SYNC;
3282 }
3283
3284 return status;
3285 }
3286
3287 /**
3288 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3289 * @hw: pointer to hardware structure
3290 * @offset: offset of word in the EEPROM to write
3291 * @words: number of words
3292 * @data: word(s) write to the EEPROM
3293 *
3294 * Write a 16 bit word(s) to the EEPROM using the hostif.
3295 **/
ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3296 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3297 u16 offset, u16 words, u16 *data)
3298 {
3299 s32 status = IXGBE_SUCCESS;
3300 u32 i = 0;
3301
3302 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3303
3304 /* Take semaphore for the entire operation. */
3305 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3306 if (status != IXGBE_SUCCESS) {
3307 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3308 goto out;
3309 }
3310
3311 for (i = 0; i < words; i++) {
3312 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3313 data[i]);
3314
3315 if (status != IXGBE_SUCCESS) {
3316 DEBUGOUT("Eeprom buffered write failed\n");
3317 break;
3318 }
3319 }
3320
3321 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3322 out:
3323
3324 return status;
3325 }
3326
3327 /**
3328 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3329 * @hw: pointer to hardware structure
3330 * @ptr: pointer offset in eeprom
3331 * @size: size of section pointed by ptr, if 0 first word will be used as size
3332 * @csum: address of checksum to update
3333 * @buffer: pointer to buffer containing calculated checksum
3334 * @buffer_size: size of buffer
3335 *
3336 * Returns error status for any failure
3337 */
ixgbe_checksum_ptr_x550(struct ixgbe_hw * hw,u16 ptr,u16 size,u16 * csum,u16 * buffer,u32 buffer_size)3338 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3339 u16 size, u16 *csum, u16 *buffer,
3340 u32 buffer_size)
3341 {
3342 u16 buf[256];
3343 s32 status;
3344 u16 length, bufsz, i, start;
3345 u16 *local_buffer;
3346
3347 bufsz = sizeof(buf) / sizeof(buf[0]);
3348
3349 /* Read a chunk at the pointer location */
3350 if (!buffer) {
3351 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3352 if (status) {
3353 DEBUGOUT("Failed to read EEPROM image\n");
3354 return status;
3355 }
3356 local_buffer = buf;
3357 } else {
3358 if (buffer_size < ptr)
3359 return IXGBE_ERR_PARAM;
3360 local_buffer = &buffer[ptr];
3361 }
3362
3363 if (size) {
3364 start = 0;
3365 length = size;
3366 } else {
3367 start = 1;
3368 length = local_buffer[0];
3369
3370 /* Skip pointer section if length is invalid. */
3371 if (length == 0xFFFF || length == 0 ||
3372 (ptr + length) >= hw->eeprom.word_size)
3373 return IXGBE_SUCCESS;
3374 }
3375
3376 if (buffer && ((u32)start + (u32)length > buffer_size))
3377 return IXGBE_ERR_PARAM;
3378
3379 for (i = start; length; i++, length--) {
3380 if (i == bufsz && !buffer) {
3381 ptr += bufsz;
3382 i = 0;
3383 if (length < bufsz)
3384 bufsz = length;
3385
3386 /* Read a chunk at the pointer location */
3387 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3388 bufsz, buf);
3389 if (status) {
3390 DEBUGOUT("Failed to read EEPROM image\n");
3391 return status;
3392 }
3393 }
3394 *csum += local_buffer[i];
3395 }
3396 return IXGBE_SUCCESS;
3397 }
3398
3399 /**
3400 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3401 * @hw: pointer to hardware structure
3402 * @buffer: pointer to buffer containing calculated checksum
3403 * @buffer_size: size of buffer
3404 *
3405 * Returns a negative error code on error, or the 16-bit checksum
3406 **/
ixgbe_calc_checksum_X550(struct ixgbe_hw * hw,u16 * buffer,u32 buffer_size)3407 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3408 {
3409 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3410 u16 *local_buffer;
3411 s32 status;
3412 u16 checksum = 0;
3413 u16 pointer, i, size;
3414
3415 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3416
3417 hw->eeprom.ops.init_params(hw);
3418
3419 if (!buffer) {
3420 /* Read pointer area */
3421 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3422 IXGBE_EEPROM_LAST_WORD + 1,
3423 eeprom_ptrs);
3424 if (status) {
3425 DEBUGOUT("Failed to read EEPROM image\n");
3426 return status;
3427 }
3428 local_buffer = eeprom_ptrs;
3429 } else {
3430 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3431 return IXGBE_ERR_PARAM;
3432 local_buffer = buffer;
3433 }
3434
3435 /*
3436 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3437 * checksum word itself
3438 */
3439 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3440 if (i != IXGBE_EEPROM_CHECKSUM)
3441 checksum += local_buffer[i];
3442
3443 /*
3444 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3445 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3446 */
3447 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3448 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3449 continue;
3450
3451 pointer = local_buffer[i];
3452
3453 /* Skip pointer section if the pointer is invalid. */
3454 if (pointer == 0xFFFF || pointer == 0 ||
3455 pointer >= hw->eeprom.word_size)
3456 continue;
3457
3458 switch (i) {
3459 case IXGBE_PCIE_GENERAL_PTR:
3460 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3461 break;
3462 case IXGBE_PCIE_CONFIG0_PTR:
3463 case IXGBE_PCIE_CONFIG1_PTR:
3464 size = IXGBE_PCIE_CONFIG_SIZE;
3465 break;
3466 default:
3467 size = 0;
3468 break;
3469 }
3470
3471 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3472 buffer, buffer_size);
3473 if (status)
3474 return status;
3475 }
3476
3477 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3478
3479 return (s32)checksum;
3480 }
3481
3482 /**
3483 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3484 * @hw: pointer to hardware structure
3485 *
3486 * Returns a negative error code on error, or the 16-bit checksum
3487 **/
ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw * hw)3488 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3489 {
3490 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3491 }
3492
3493 /**
3494 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3495 * @hw: pointer to hardware structure
3496 * @checksum_val: calculated checksum
3497 *
3498 * Performs checksum calculation and validates the EEPROM checksum. If the
3499 * caller does not need checksum_val, the value can be NULL.
3500 **/
ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw * hw,u16 * checksum_val)3501 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3502 {
3503 s32 status;
3504 u16 checksum;
3505 u16 read_checksum = 0;
3506
3507 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3508
3509 /* Read the first word from the EEPROM. If this times out or fails, do
3510 * not continue or we could be in for a very long wait while every
3511 * EEPROM read fails
3512 */
3513 status = hw->eeprom.ops.read(hw, 0, &checksum);
3514 if (status) {
3515 DEBUGOUT("EEPROM read failed\n");
3516 return status;
3517 }
3518
3519 status = hw->eeprom.ops.calc_checksum(hw);
3520 if (status < 0)
3521 return status;
3522
3523 checksum = (u16)(status & 0xffff);
3524
3525 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3526 &read_checksum);
3527 if (status)
3528 return status;
3529
3530 /* Verify read checksum from EEPROM is the same as
3531 * calculated checksum
3532 */
3533 if (read_checksum != checksum) {
3534 status = IXGBE_ERR_EEPROM_CHECKSUM;
3535 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3536 "Invalid EEPROM checksum");
3537 }
3538
3539 /* If the user cares, return the calculated checksum */
3540 if (checksum_val)
3541 *checksum_val = checksum;
3542
3543 return status;
3544 }
3545
3546 /**
3547 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3548 * @hw: pointer to hardware structure
3549 *
3550 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3551 * checksum and updates the EEPROM and instructs the hardware to update
3552 * the flash.
3553 **/
ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw * hw)3554 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3555 {
3556 s32 status;
3557 u16 checksum = 0;
3558
3559 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3560
3561 /* Read the first word from the EEPROM. If this times out or fails, do
3562 * not continue or we could be in for a very long wait while every
3563 * EEPROM read fails
3564 */
3565 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3566 if (status) {
3567 DEBUGOUT("EEPROM read failed\n");
3568 return status;
3569 }
3570
3571 status = ixgbe_calc_eeprom_checksum_X550(hw);
3572 if (status < 0)
3573 return status;
3574
3575 checksum = (u16)(status & 0xffff);
3576
3577 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3578 checksum);
3579 if (status)
3580 return status;
3581
3582 status = ixgbe_update_flash_X550(hw);
3583
3584 return status;
3585 }
3586
3587 /**
3588 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3589 * @hw: pointer to hardware structure
3590 *
3591 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3592 **/
ixgbe_update_flash_X550(struct ixgbe_hw * hw)3593 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3594 {
3595 s32 status = IXGBE_SUCCESS;
3596 union ixgbe_hic_hdr2 buffer;
3597
3598 DEBUGFUNC("ixgbe_update_flash_X550");
3599
3600 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3601 buffer.req.buf_lenh = 0;
3602 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3603 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3604
3605 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3606 sizeof(buffer),
3607 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3608
3609 return status;
3610 }
3611
3612 /**
3613 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3614 * @hw: pointer to hardware structure
3615 *
3616 * Determines physical layer capabilities of the current configuration.
3617 **/
ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw * hw)3618 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3619 {
3620 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3621 u16 ext_ability = 0;
3622
3623 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3624
3625 hw->phy.ops.identify(hw);
3626
3627 switch (hw->phy.type) {
3628 case ixgbe_phy_x550em_kr:
3629 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3630 if (hw->phy.nw_mng_if_sel &
3631 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3632 physical_layer =
3633 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3634 break;
3635 } else if (hw->device_id ==
3636 IXGBE_DEV_ID_X550EM_A_KR_L) {
3637 physical_layer =
3638 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3639 break;
3640 }
3641 }
3642 /* fall through */
3643 case ixgbe_phy_x550em_xfi:
3644 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3645 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3646 break;
3647 case ixgbe_phy_x550em_kx4:
3648 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3649 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3650 break;
3651 case ixgbe_phy_x550em_ext_t:
3652 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3653 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3654 &ext_ability);
3655 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3656 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3657 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3658 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3659 break;
3660 case ixgbe_phy_fw:
3661 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3662 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3663 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3664 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3665 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3666 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3667 break;
3668 case ixgbe_phy_sgmii:
3669 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3670 break;
3671 case ixgbe_phy_ext_1g_t:
3672 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3673 break;
3674 default:
3675 break;
3676 }
3677
3678 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3679 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3680
3681 return physical_layer;
3682 }
3683
3684 /**
3685 * ixgbe_get_bus_info_x550em - Set PCI bus info
3686 * @hw: pointer to hardware structure
3687 *
3688 * Sets bus link width and speed to unknown because X550em is
3689 * not a PCI device.
3690 **/
ixgbe_get_bus_info_X550em(struct ixgbe_hw * hw)3691 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3692 {
3693
3694 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3695
3696 hw->bus.width = ixgbe_bus_width_unknown;
3697 hw->bus.speed = ixgbe_bus_speed_unknown;
3698
3699 hw->mac.ops.set_lan_id(hw);
3700
3701 return IXGBE_SUCCESS;
3702 }
3703
3704 /**
3705 * ixgbe_disable_rx_x550 - Disable RX unit
3706 * @hw: pointer to hardware structure
3707 *
3708 * Enables the Rx DMA unit for x550
3709 **/
ixgbe_disable_rx_x550(struct ixgbe_hw * hw)3710 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3711 {
3712 u32 rxctrl, pfdtxgswc;
3713 s32 status;
3714 struct ixgbe_hic_disable_rxen fw_cmd;
3715
3716 DEBUGFUNC("ixgbe_enable_rx_dma_x550");
3717
3718 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3719 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3720 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3721 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3722 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3723 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3724 hw->mac.set_lben = TRUE;
3725 } else {
3726 hw->mac.set_lben = FALSE;
3727 }
3728
3729 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3730 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3731 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3732 fw_cmd.port_number = (u8)hw->bus.lan_id;
3733
3734 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3735 sizeof(struct ixgbe_hic_disable_rxen),
3736 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3737
3738 /* If we fail - disable RX using register write */
3739 if (status) {
3740 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3741 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3742 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3743 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3744 }
3745 }
3746 }
3747 }
3748
3749 /**
3750 * ixgbe_enter_lplu_x550em - Transition to low power states
3751 * @hw: pointer to hardware structure
3752 *
3753 * Configures Low Power Link Up on transition to low power states
3754 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3755 * X557 PHY immediately prior to entering LPLU.
3756 **/
ixgbe_enter_lplu_t_x550em(struct ixgbe_hw * hw)3757 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3758 {
3759 u16 an_10g_cntl_reg, autoneg_reg, speed;
3760 s32 status;
3761 ixgbe_link_speed lcd_speed;
3762 u32 save_autoneg;
3763 bool link_up;
3764
3765 /* SW LPLU not required on later HW revisions. */
3766 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3767 (IXGBE_FUSES0_REV_MASK &
3768 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3769 return IXGBE_SUCCESS;
3770
3771 /* If blocked by MNG FW, then don't restart AN */
3772 if (ixgbe_check_reset_blocked(hw))
3773 return IXGBE_SUCCESS;
3774
3775 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3776 if (status != IXGBE_SUCCESS)
3777 return status;
3778
3779 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3780
3781 if (status != IXGBE_SUCCESS)
3782 return status;
3783
3784 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3785 * disabled, then force link down by entering low power mode.
3786 */
3787 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3788 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3789 return ixgbe_set_copper_phy_power(hw, FALSE);
3790
3791 /* Determine LCD */
3792 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3793
3794 if (status != IXGBE_SUCCESS)
3795 return status;
3796
3797 /* If no valid LCD link speed, then force link down and exit. */
3798 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3799 return ixgbe_set_copper_phy_power(hw, FALSE);
3800
3801 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3802 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3803 &speed);
3804
3805 if (status != IXGBE_SUCCESS)
3806 return status;
3807
3808 /* If no link now, speed is invalid so take link down */
3809 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3810 if (status != IXGBE_SUCCESS)
3811 return ixgbe_set_copper_phy_power(hw, FALSE);
3812
3813 /* clear everything but the speed bits */
3814 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3815
3816 /* If current speed is already LCD, then exit. */
3817 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3818 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3819 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3820 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3821 return status;
3822
3823 /* Clear AN completed indication */
3824 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
3825 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3826 &autoneg_reg);
3827
3828 if (status != IXGBE_SUCCESS)
3829 return status;
3830
3831 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
3832 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3833 &an_10g_cntl_reg);
3834
3835 if (status != IXGBE_SUCCESS)
3836 return status;
3837
3838 status = hw->phy.ops.read_reg(hw,
3839 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
3840 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3841 &autoneg_reg);
3842
3843 if (status != IXGBE_SUCCESS)
3844 return status;
3845
3846 save_autoneg = hw->phy.autoneg_advertised;
3847
3848 /* Setup link at least common link speed */
3849 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
3850
3851 /* restore autoneg from before setting lplu speed */
3852 hw->phy.autoneg_advertised = save_autoneg;
3853
3854 return status;
3855 }
3856
3857 /**
3858 * ixgbe_get_lcd_x550em - Determine lowest common denominator
3859 * @hw: pointer to hardware structure
3860 * @lcd_speed: pointer to lowest common link speed
3861 *
3862 * Determine lowest common link speed with link partner.
3863 **/
ixgbe_get_lcd_t_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * lcd_speed)3864 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
3865 {
3866 u16 an_lp_status;
3867 s32 status;
3868 u16 word = hw->eeprom.ctrl_word_3;
3869
3870 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
3871
3872 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
3873 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3874 &an_lp_status);
3875
3876 if (status != IXGBE_SUCCESS)
3877 return status;
3878
3879 /* If link partner advertised 1G, return 1G */
3880 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
3881 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
3882 return status;
3883 }
3884
3885 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
3886 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
3887 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
3888 return status;
3889
3890 /* Link partner not capable of lower speeds, return 10G */
3891 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
3892 return status;
3893 }
3894
3895 /**
3896 * ixgbe_setup_fc_X550em - Set up flow control
3897 * @hw: pointer to hardware structure
3898 *
3899 * Called at init time to set up flow control.
3900 **/
ixgbe_setup_fc_X550em(struct ixgbe_hw * hw)3901 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
3902 {
3903 s32 ret_val = IXGBE_SUCCESS;
3904 u32 pause, asm_dir, reg_val;
3905
3906 DEBUGFUNC("ixgbe_setup_fc_X550em");
3907
3908 /* Validate the requested mode */
3909 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
3910 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3911 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
3912 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
3913 goto out;
3914 }
3915
3916 /* 10gig parts do not have a word in the EEPROM to determine the
3917 * default flow control setting, so we explicitly set it to full.
3918 */
3919 if (hw->fc.requested_mode == ixgbe_fc_default)
3920 hw->fc.requested_mode = ixgbe_fc_full;
3921
3922 /* Determine PAUSE and ASM_DIR bits. */
3923 switch (hw->fc.requested_mode) {
3924 case ixgbe_fc_none:
3925 pause = 0;
3926 asm_dir = 0;
3927 break;
3928 case ixgbe_fc_tx_pause:
3929 pause = 0;
3930 asm_dir = 1;
3931 break;
3932 case ixgbe_fc_rx_pause:
3933 /* Rx Flow control is enabled and Tx Flow control is
3934 * disabled by software override. Since there really
3935 * isn't a way to advertise that we are capable of RX
3936 * Pause ONLY, we will advertise that we support both
3937 * symmetric and asymmetric Rx PAUSE, as such we fall
3938 * through to the fc_full statement. Later, we will
3939 * disable the adapter's ability to send PAUSE frames.
3940 */
3941 case ixgbe_fc_full:
3942 pause = 1;
3943 asm_dir = 1;
3944 break;
3945 default:
3946 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
3947 "Flow control param set incorrectly\n");
3948 ret_val = IXGBE_ERR_CONFIG;
3949 goto out;
3950 }
3951
3952 switch (hw->device_id) {
3953 case IXGBE_DEV_ID_X550EM_X_KR:
3954 case IXGBE_DEV_ID_X550EM_A_KR:
3955 case IXGBE_DEV_ID_X550EM_A_KR_L:
3956 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
3957 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3958 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3959 if (ret_val != IXGBE_SUCCESS)
3960 goto out;
3961 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
3962 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
3963 if (pause)
3964 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
3965 if (asm_dir)
3966 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
3967 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
3968 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3969 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3970
3971 /* This device does not fully support AN. */
3972 hw->fc.disable_fc_autoneg = TRUE;
3973 break;
3974 case IXGBE_DEV_ID_X550EM_X_XFI:
3975 hw->fc.disable_fc_autoneg = TRUE;
3976 break;
3977 default:
3978 break;
3979 }
3980
3981 out:
3982 return ret_val;
3983 }
3984
3985 /**
3986 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
3987 * @hw: pointer to hardware structure
3988 *
3989 * Enable flow control according to IEEE clause 37.
3990 **/
ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw * hw)3991 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
3992 {
3993 u32 link_s1, lp_an_page_low, an_cntl_1;
3994 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
3995 ixgbe_link_speed speed;
3996 bool link_up;
3997
3998 /* AN should have completed when the cable was plugged in.
3999 * Look for reasons to bail out. Bail out if:
4000 * - FC autoneg is disabled, or if
4001 * - link is not up.
4002 */
4003 if (hw->fc.disable_fc_autoneg) {
4004 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4005 "Flow control autoneg is disabled");
4006 goto out;
4007 }
4008
4009 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4010 if (!link_up) {
4011 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4012 goto out;
4013 }
4014
4015 /* Check at auto-negotiation has completed */
4016 status = hw->mac.ops.read_iosf_sb_reg(hw,
4017 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4018 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4019
4020 if (status != IXGBE_SUCCESS ||
4021 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4022 DEBUGOUT("Auto-Negotiation did not complete\n");
4023 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4024 goto out;
4025 }
4026
4027 /* Read the 10g AN autoc and LP ability registers and resolve
4028 * local flow control settings accordingly
4029 */
4030 status = hw->mac.ops.read_iosf_sb_reg(hw,
4031 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4032 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4033
4034 if (status != IXGBE_SUCCESS) {
4035 DEBUGOUT("Auto-Negotiation did not complete\n");
4036 goto out;
4037 }
4038
4039 status = hw->mac.ops.read_iosf_sb_reg(hw,
4040 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4041 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4042
4043 if (status != IXGBE_SUCCESS) {
4044 DEBUGOUT("Auto-Negotiation did not complete\n");
4045 goto out;
4046 }
4047
4048 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4049 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4050 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4051 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4052 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4053
4054 out:
4055 if (status == IXGBE_SUCCESS) {
4056 hw->fc.fc_was_autonegged = TRUE;
4057 } else {
4058 hw->fc.fc_was_autonegged = FALSE;
4059 hw->fc.current_mode = hw->fc.requested_mode;
4060 }
4061 }
4062
4063 /**
4064 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4065 * @hw: pointer to hardware structure
4066 *
4067 **/
ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw * hw)4068 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4069 {
4070 hw->fc.fc_was_autonegged = FALSE;
4071 hw->fc.current_mode = hw->fc.requested_mode;
4072 }
4073
4074 /**
4075 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4076 * @hw: pointer to hardware structure
4077 *
4078 * Enable flow control according to IEEE clause 37.
4079 **/
ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw * hw)4080 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4081 {
4082 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4083 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4084 ixgbe_link_speed speed;
4085 bool link_up;
4086
4087 /* AN should have completed when the cable was plugged in.
4088 * Look for reasons to bail out. Bail out if:
4089 * - FC autoneg is disabled, or if
4090 * - link is not up.
4091 */
4092 if (hw->fc.disable_fc_autoneg) {
4093 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4094 "Flow control autoneg is disabled");
4095 goto out;
4096 }
4097
4098 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4099 if (!link_up) {
4100 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4101 goto out;
4102 }
4103
4104 /* Check if auto-negotiation has completed */
4105 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4106 if (status != IXGBE_SUCCESS ||
4107 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4108 DEBUGOUT("Auto-Negotiation did not complete\n");
4109 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4110 goto out;
4111 }
4112
4113 /* Negotiate the flow control */
4114 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4115 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4116 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4117 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4118 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4119
4120 out:
4121 if (status == IXGBE_SUCCESS) {
4122 hw->fc.fc_was_autonegged = TRUE;
4123 } else {
4124 hw->fc.fc_was_autonegged = FALSE;
4125 hw->fc.current_mode = hw->fc.requested_mode;
4126 }
4127 }
4128
4129 /**
4130 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4131 * @hw: pointer to hardware structure
4132 *
4133 * Called at init time to set up flow control.
4134 **/
ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw * hw)4135 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4136 {
4137 s32 status = IXGBE_SUCCESS;
4138 u32 an_cntl = 0;
4139
4140 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4141
4142 /* Validate the requested mode */
4143 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4144 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4145 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4146 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4147 }
4148
4149 if (hw->fc.requested_mode == ixgbe_fc_default)
4150 hw->fc.requested_mode = ixgbe_fc_full;
4151
4152 /* Set up the 1G and 10G flow control advertisement registers so the
4153 * HW will be able to do FC autoneg once the cable is plugged in. If
4154 * we link at 10G, the 1G advertisement is harmless and vice versa.
4155 */
4156 status = hw->mac.ops.read_iosf_sb_reg(hw,
4157 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4158 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4159
4160 if (status != IXGBE_SUCCESS) {
4161 DEBUGOUT("Auto-Negotiation did not complete\n");
4162 return status;
4163 }
4164
4165 /* The possible values of fc.requested_mode are:
4166 * 0: Flow control is completely disabled
4167 * 1: Rx flow control is enabled (we can receive pause frames,
4168 * but not send pause frames).
4169 * 2: Tx flow control is enabled (we can send pause frames but
4170 * we do not support receiving pause frames).
4171 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4172 * other: Invalid.
4173 */
4174 switch (hw->fc.requested_mode) {
4175 case ixgbe_fc_none:
4176 /* Flow control completely disabled by software override. */
4177 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4178 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4179 break;
4180 case ixgbe_fc_tx_pause:
4181 /* Tx Flow control is enabled, and Rx Flow control is
4182 * disabled by software override.
4183 */
4184 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4185 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4186 break;
4187 case ixgbe_fc_rx_pause:
4188 /* Rx Flow control is enabled and Tx Flow control is
4189 * disabled by software override. Since there really
4190 * isn't a way to advertise that we are capable of RX
4191 * Pause ONLY, we will advertise that we support both
4192 * symmetric and asymmetric Rx PAUSE, as such we fall
4193 * through to the fc_full statement. Later, we will
4194 * disable the adapter's ability to send PAUSE frames.
4195 */
4196 case ixgbe_fc_full:
4197 /* Flow control (both Rx and Tx) is enabled by SW override. */
4198 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4199 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4200 break;
4201 default:
4202 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4203 "Flow control param set incorrectly\n");
4204 return IXGBE_ERR_CONFIG;
4205 }
4206
4207 status = hw->mac.ops.write_iosf_sb_reg(hw,
4208 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4209 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4210
4211 /* Restart auto-negotiation. */
4212 status = ixgbe_restart_an_internal_phy_x550em(hw);
4213
4214 return status;
4215 }
4216
4217 /**
4218 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4219 * @hw: pointer to hardware structure
4220 * @state: set mux if 1, clear if 0
4221 */
ixgbe_set_mux(struct ixgbe_hw * hw,u8 state)4222 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4223 {
4224 u32 esdp;
4225
4226 if (!hw->bus.lan_id)
4227 return;
4228 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4229 if (state)
4230 esdp |= IXGBE_ESDP_SDP1;
4231 else
4232 esdp &= ~IXGBE_ESDP_SDP1;
4233 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4234 IXGBE_WRITE_FLUSH(hw);
4235 }
4236
4237 /**
4238 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4239 * @hw: pointer to hardware structure
4240 * @mask: Mask to specify which semaphore to acquire
4241 *
4242 * Acquires the SWFW semaphore and sets the I2C MUX
4243 **/
ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4244 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4245 {
4246 s32 status;
4247
4248 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4249
4250 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4251 if (status)
4252 return status;
4253
4254 if (mask & IXGBE_GSSR_I2C_MASK)
4255 ixgbe_set_mux(hw, 1);
4256
4257 return IXGBE_SUCCESS;
4258 }
4259
4260 /**
4261 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4262 * @hw: pointer to hardware structure
4263 * @mask: Mask to specify which semaphore to release
4264 *
4265 * Releases the SWFW semaphore and sets the I2C MUX
4266 **/
ixgbe_release_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4267 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4268 {
4269 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4270
4271 if (mask & IXGBE_GSSR_I2C_MASK)
4272 ixgbe_set_mux(hw, 0);
4273
4274 ixgbe_release_swfw_sync_X540(hw, mask);
4275 }
4276
4277 /**
4278 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4279 * @hw: pointer to hardware structure
4280 * @mask: Mask to specify which semaphore to acquire
4281 *
4282 * Acquires the SWFW semaphore and get the shared phy token as needed
4283 */
ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4284 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4285 {
4286 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4287 int retries = FW_PHY_TOKEN_RETRIES;
4288 s32 status = IXGBE_SUCCESS;
4289
4290 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4291
4292 while (--retries) {
4293 status = IXGBE_SUCCESS;
4294 if (hmask)
4295 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4296 if (status) {
4297 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4298 status);
4299 return status;
4300 }
4301 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4302 return IXGBE_SUCCESS;
4303
4304 status = ixgbe_get_phy_token(hw);
4305 if (status == IXGBE_ERR_TOKEN_RETRY)
4306 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4307 status);
4308
4309 if (status == IXGBE_SUCCESS)
4310 return IXGBE_SUCCESS;
4311
4312 if (hmask)
4313 ixgbe_release_swfw_sync_X540(hw, hmask);
4314
4315 if (status != IXGBE_ERR_TOKEN_RETRY) {
4316 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4317 status);
4318 return status;
4319 }
4320 }
4321
4322 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4323 hw->phy.id);
4324 return status;
4325 }
4326
4327 /**
4328 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4329 * @hw: pointer to hardware structure
4330 * @mask: Mask to specify which semaphore to release
4331 *
4332 * Releases the SWFW semaphore and puts the shared phy token as needed
4333 */
ixgbe_release_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4334 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4335 {
4336 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4337
4338 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4339
4340 if (mask & IXGBE_GSSR_TOKEN_SM)
4341 ixgbe_put_phy_token(hw);
4342
4343 if (hmask)
4344 ixgbe_release_swfw_sync_X540(hw, hmask);
4345 }
4346
4347 /**
4348 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4349 * @hw: pointer to hardware structure
4350 * @reg_addr: 32 bit address of PHY register to read
4351 * @device_type: 5 bit device type
4352 * @phy_data: Pointer to read data from PHY register
4353 *
4354 * Reads a value from a specified PHY register using the SWFW lock and PHY
4355 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4356 * instances.
4357 **/
ixgbe_read_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)4358 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4359 u32 device_type, u16 *phy_data)
4360 {
4361 s32 status;
4362 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4363
4364 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4365
4366 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4367 return IXGBE_ERR_SWFW_SYNC;
4368
4369 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4370
4371 hw->mac.ops.release_swfw_sync(hw, mask);
4372
4373 return status;
4374 }
4375
4376 /**
4377 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4378 * @hw: pointer to hardware structure
4379 * @reg_addr: 32 bit PHY register to write
4380 * @device_type: 5 bit device type
4381 * @phy_data: Data to write to the PHY register
4382 *
4383 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4384 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4385 **/
ixgbe_write_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)4386 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4387 u32 device_type, u16 phy_data)
4388 {
4389 s32 status;
4390 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4391
4392 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4393
4394 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4395 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4396 phy_data);
4397 hw->mac.ops.release_swfw_sync(hw, mask);
4398 } else {
4399 status = IXGBE_ERR_SWFW_SYNC;
4400 }
4401
4402 return status;
4403 }
4404
4405 /**
4406 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4407 * @hw: pointer to hardware structure
4408 *
4409 * Handle external Base T PHY interrupt. If high temperature
4410 * failure alarm then return error, else if link status change
4411 * then setup internal/external PHY link
4412 *
4413 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4414 * failure alarm, else return PHY access status.
4415 */
ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw * hw)4416 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4417 {
4418 bool lsc;
4419 u32 status;
4420
4421 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4422
4423 if (status != IXGBE_SUCCESS)
4424 return status;
4425
4426 if (lsc)
4427 return ixgbe_setup_internal_phy(hw);
4428
4429 return IXGBE_SUCCESS;
4430 }
4431
4432 /**
4433 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4434 * @hw: pointer to hardware structure
4435 * @speed: new link speed
4436 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4437 *
4438 * Setup internal/external PHY link speed based on link speed, then set
4439 * external PHY auto advertised link speed.
4440 *
4441 * Returns error status for any failure
4442 **/
ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)4443 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4444 ixgbe_link_speed speed,
4445 bool autoneg_wait_to_complete)
4446 {
4447 s32 status;
4448 ixgbe_link_speed force_speed;
4449
4450 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4451
4452 /* Setup internal/external PHY link speed to iXFI (10G), unless
4453 * only 1G is auto advertised then setup KX link.
4454 */
4455 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4456 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4457 else
4458 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4459
4460 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4461 */
4462 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4463 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4464 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4465
4466 if (status != IXGBE_SUCCESS)
4467 return status;
4468 }
4469
4470 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4471 }
4472
4473 /**
4474 * ixgbe_check_link_t_X550em - Determine link and speed status
4475 * @hw: pointer to hardware structure
4476 * @speed: pointer to link speed
4477 * @link_up: TRUE when link is up
4478 * @link_up_wait_to_complete: bool used to wait for link up or not
4479 *
4480 * Check that both the MAC and X557 external PHY have link.
4481 **/
ixgbe_check_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4482 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4483 bool *link_up, bool link_up_wait_to_complete)
4484 {
4485 u32 status;
4486 u16 i, autoneg_status = 0;
4487
4488 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4489 return IXGBE_ERR_CONFIG;
4490
4491 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4492 link_up_wait_to_complete);
4493
4494 /* If check link fails or MAC link is not up, then return */
4495 if (status != IXGBE_SUCCESS || !(*link_up))
4496 return status;
4497
4498 /* MAC link is up, so check external PHY link.
4499 * X557 PHY. Link status is latching low, and can only be used to detect
4500 * link drop, and not the current status of the link without performing
4501 * back-to-back reads.
4502 */
4503 for (i = 0; i < 2; i++) {
4504 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4505 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4506 &autoneg_status);
4507
4508 if (status != IXGBE_SUCCESS)
4509 return status;
4510 }
4511
4512 /* If external PHY link is not up, then indicate link not up */
4513 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4514 *link_up = FALSE;
4515
4516 return IXGBE_SUCCESS;
4517 }
4518
4519 /**
4520 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4521 * @hw: pointer to hardware structure
4522 **/
ixgbe_reset_phy_t_X550em(struct ixgbe_hw * hw)4523 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4524 {
4525 s32 status;
4526
4527 status = ixgbe_reset_phy_generic(hw);
4528
4529 if (status != IXGBE_SUCCESS)
4530 return status;
4531
4532 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4533 return ixgbe_enable_lasi_ext_t_x550em(hw);
4534 }
4535
4536 /**
4537 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4538 * @hw: pointer to hardware structure
4539 * @led_idx: led number to turn on
4540 **/
ixgbe_led_on_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4541 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4542 {
4543 u16 phy_data;
4544
4545 DEBUGFUNC("ixgbe_led_on_t_X550em");
4546
4547 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4548 return IXGBE_ERR_PARAM;
4549
4550 /* To turn on the LED, set mode to ON. */
4551 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4552 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4553 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4554 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4555 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4556
4557 /* Some designs have the LEDs wired to the MAC */
4558 return ixgbe_led_on_generic(hw, led_idx);
4559 }
4560
4561 /**
4562 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4563 * @hw: pointer to hardware structure
4564 * @led_idx: led number to turn off
4565 **/
ixgbe_led_off_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4566 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4567 {
4568 u16 phy_data;
4569
4570 DEBUGFUNC("ixgbe_led_off_t_X550em");
4571
4572 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4573 return IXGBE_ERR_PARAM;
4574
4575 /* To turn on the LED, set mode to ON. */
4576 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4577 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4578 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4579 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4580 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4581
4582 /* Some designs have the LEDs wired to the MAC */
4583 return ixgbe_led_off_generic(hw, led_idx);
4584 }
4585
4586 /**
4587 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4588 * @hw: pointer to the HW structure
4589 * @maj: driver version major number
4590 * @min: driver version minor number
4591 * @build: driver version build number
4592 * @sub: driver version sub build number
4593 * @len: length of driver_ver string
4594 * @driver_ver: driver string
4595 *
4596 * Sends driver version number to firmware through the manageability
4597 * block. On success return IXGBE_SUCCESS
4598 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4599 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4600 **/
ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw * hw,u8 maj,u8 min,u8 build,u8 sub,u16 len,const char * driver_ver)4601 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4602 u8 build, u8 sub, u16 len, const char *driver_ver)
4603 {
4604 struct ixgbe_hic_drv_info2 fw_cmd;
4605 s32 ret_val = IXGBE_SUCCESS;
4606 int i;
4607
4608 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4609
4610 if ((len == 0) || (driver_ver == NULL) ||
4611 (len > sizeof(fw_cmd.driver_string)))
4612 return IXGBE_ERR_INVALID_ARGUMENT;
4613
4614 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4615 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4616 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4617 fw_cmd.port_num = (u8)hw->bus.func;
4618 fw_cmd.ver_maj = maj;
4619 fw_cmd.ver_min = min;
4620 fw_cmd.ver_build = build;
4621 fw_cmd.ver_sub = sub;
4622 fw_cmd.hdr.checksum = 0;
4623 memcpy(fw_cmd.driver_string, driver_ver, len);
4624 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4625 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4626
4627 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4628 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4629 sizeof(fw_cmd),
4630 IXGBE_HI_COMMAND_TIMEOUT,
4631 TRUE);
4632 if (ret_val != IXGBE_SUCCESS)
4633 continue;
4634
4635 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4636 FW_CEM_RESP_STATUS_SUCCESS)
4637 ret_val = IXGBE_SUCCESS;
4638 else
4639 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4640
4641 break;
4642 }
4643
4644 return ret_val;
4645 }
4646
4647 /**
4648 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4649 * @hw: pointer t hardware structure
4650 *
4651 * Returns TRUE if in FW NVM recovery mode.
4652 **/
ixgbe_fw_recovery_mode_X550(struct ixgbe_hw * hw)4653 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4654 {
4655 u32 fwsm;
4656
4657 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4658
4659 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4660 }
4661
4662