1 /****************************************************************************** 2 3 Copyright (c) 2001-2011, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 /* 36 * 82575EB Gigabit Network Connection 37 * 82575EB Gigabit Backplane Connection 38 * 82575GB Gigabit Network Connection 39 * 82576 Gigabit Network Connection 40 * 82576 Quad Port Gigabit Mezzanine Adapter 41 * 82580 Gigabit Network Connection 42 * I350 Gigabit Network Connection 43 */ 44 45 #include "e1000_api.h" 46 #include "e1000_i210.h" 47 48 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); 49 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); 50 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); 51 static void e1000_release_phy_82575(struct e1000_hw *hw); 52 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); 53 static void e1000_release_nvm_82575(struct e1000_hw *hw); 54 static s32 e1000_check_for_link_82575(struct e1000_hw *hw); 55 static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); 56 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); 57 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 58 u16 *duplex); 59 static s32 e1000_init_hw_82575(struct e1000_hw *hw); 60 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); 61 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 62 u16 *data); 63 static s32 e1000_reset_hw_82575(struct e1000_hw *hw); 64 static s32 e1000_reset_hw_82580(struct e1000_hw *hw); 65 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, 66 u32 offset, u16 *data); 67 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, 68 u32 offset, u16 data); 69 static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, 70 bool active); 71 static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, 72 bool active); 73 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, 74 bool active); 75 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); 76 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); 77 static s32 e1000_get_media_type_82575(struct e1000_hw *hw); 78 static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); 79 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); 80 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, 81 u32 offset, u16 data); 82 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); 83 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); 84 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, 85 u16 *speed, u16 *duplex); 86 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); 87 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); 88 static bool e1000_sgmii_active_82575(struct e1000_hw *hw); 89 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); 90 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); 91 static void e1000_config_collision_dist_82575(struct e1000_hw *hw); 92 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); 93 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); 94 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); 95 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); 96 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); 97 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); 98 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); 99 static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, 100 u16 offset); 101 static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 102 u16 offset); 103 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); 104 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); 105 static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); 106 static void e1000_clear_vfta_i350(struct e1000_hw *hw); 107 108 static void e1000_i2c_start(struct e1000_hw *hw); 109 static void e1000_i2c_stop(struct e1000_hw *hw); 110 static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); 111 static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); 112 static s32 e1000_get_i2c_ack(struct e1000_hw *hw); 113 static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); 114 static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); 115 static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); 116 static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); 117 static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); 118 static bool e1000_get_i2c_data(u32 *i2cctl); 119 120 static const u16 e1000_82580_rxpbs_table[] = { 121 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; 122 #define E1000_82580_RXPBS_TABLE_SIZE \ 123 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) 124 125 126 /** 127 * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 128 * @hw: pointer to the HW structure 129 * 130 * Called to determine if the I2C pins are being used for I2C or as an 131 * external MDIO interface since the two options are mutually exclusive. 132 **/ 133 static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) 134 { 135 u32 reg = 0; 136 bool ext_mdio = FALSE; 137 138 DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); 139 140 switch (hw->mac.type) { 141 case e1000_82575: 142 case e1000_82576: 143 reg = E1000_READ_REG(hw, E1000_MDIC); 144 ext_mdio = !!(reg & E1000_MDIC_DEST); 145 break; 146 case e1000_82580: 147 case e1000_i350: 148 case e1000_i354: 149 case e1000_i210: 150 case e1000_i211: 151 reg = E1000_READ_REG(hw, E1000_MDICNFG); 152 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 153 break; 154 default: 155 break; 156 } 157 return ext_mdio; 158 } 159 160 /** 161 * e1000_init_phy_params_82575 - Init PHY func ptrs. 162 * @hw: pointer to the HW structure 163 **/ 164 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) 165 { 166 struct e1000_phy_info *phy = &hw->phy; 167 s32 ret_val = E1000_SUCCESS; 168 u32 ctrl_ext; 169 170 DEBUGFUNC("e1000_init_phy_params_82575"); 171 172 phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; 173 phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; 174 175 if (hw->phy.media_type != e1000_media_type_copper) { 176 phy->type = e1000_phy_none; 177 goto out; 178 } 179 180 phy->ops.power_up = e1000_power_up_phy_copper; 181 phy->ops.power_down = e1000_power_down_phy_copper_82575; 182 183 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 184 phy->reset_delay_us = 100; 185 186 phy->ops.acquire = e1000_acquire_phy_82575; 187 phy->ops.check_reset_block = e1000_check_reset_block_generic; 188 phy->ops.commit = e1000_phy_sw_reset_generic; 189 phy->ops.get_cfg_done = e1000_get_cfg_done_82575; 190 phy->ops.release = e1000_release_phy_82575; 191 192 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 193 194 if (e1000_sgmii_active_82575(hw)) { 195 phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; 196 ctrl_ext |= E1000_CTRL_I2C_ENA; 197 } else { 198 phy->ops.reset = e1000_phy_hw_reset_generic; 199 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 200 } 201 202 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 203 e1000_reset_mdicnfg_82580(hw); 204 205 if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { 206 phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; 207 phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; 208 } else { 209 switch (hw->mac.type) { 210 case e1000_82580: 211 case e1000_i350: 212 case e1000_i354: 213 phy->ops.read_reg = e1000_read_phy_reg_82580; 214 phy->ops.write_reg = e1000_write_phy_reg_82580; 215 break; 216 case e1000_i210: 217 case e1000_i211: 218 phy->ops.read_reg = e1000_read_phy_reg_gs40g; 219 phy->ops.write_reg = e1000_write_phy_reg_gs40g; 220 break; 221 default: 222 phy->ops.read_reg = e1000_read_phy_reg_igp; 223 phy->ops.write_reg = e1000_write_phy_reg_igp; 224 } 225 } 226 227 /* Set phy->phy_addr and phy->id. */ 228 ret_val = e1000_get_phy_id_82575(hw); 229 230 /* Verify phy id and set remaining function pointers */ 231 switch (phy->id) { 232 case M88E1545_E_PHY_ID: 233 case I347AT4_E_PHY_ID: 234 case M88E1112_E_PHY_ID: 235 case M88E1340M_E_PHY_ID: 236 case M88E1111_I_PHY_ID: 237 phy->type = e1000_phy_m88; 238 phy->ops.check_polarity = e1000_check_polarity_m88; 239 phy->ops.get_info = e1000_get_phy_info_m88; 240 if (phy->id == I347AT4_E_PHY_ID || 241 phy->id == M88E1112_E_PHY_ID || 242 phy->id == M88E1340M_E_PHY_ID) 243 phy->ops.get_cable_length = 244 e1000_get_cable_length_m88_gen2; 245 else if (phy->id == M88E1545_E_PHY_ID) 246 phy->ops.get_cable_length = 247 e1000_get_cable_length_m88_gen2; 248 else 249 phy->ops.get_cable_length = e1000_get_cable_length_m88; 250 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 251 /* Check if this PHY is confgured for media swap. */ 252 if (phy->id == M88E1112_E_PHY_ID) { 253 u16 data; 254 255 ret_val = phy->ops.write_reg(hw, 256 E1000_M88E1112_PAGE_ADDR, 257 2); 258 if (ret_val) 259 goto out; 260 261 ret_val = phy->ops.read_reg(hw, 262 E1000_M88E1112_MAC_CTRL_1, 263 &data); 264 if (ret_val) 265 goto out; 266 267 if (data == E1000_M88E1112_AUTO_A || 268 data == E1000_M88E1112_AUTO_B) 269 hw->mac.ops.check_for_link = 270 e1000_check_for_link_media_swap; 271 } 272 break; 273 case IGP03E1000_E_PHY_ID: 274 case IGP04E1000_E_PHY_ID: 275 phy->type = e1000_phy_igp_3; 276 phy->ops.check_polarity = e1000_check_polarity_igp; 277 phy->ops.get_info = e1000_get_phy_info_igp; 278 phy->ops.get_cable_length = e1000_get_cable_length_igp_2; 279 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; 280 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; 281 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; 282 break; 283 case I82580_I_PHY_ID: 284 case I350_I_PHY_ID: 285 phy->type = e1000_phy_82580; 286 phy->ops.check_polarity = e1000_check_polarity_82577; 287 phy->ops.force_speed_duplex = 288 e1000_phy_force_speed_duplex_82577; 289 phy->ops.get_cable_length = e1000_get_cable_length_82577; 290 phy->ops.get_info = e1000_get_phy_info_82577; 291 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; 292 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; 293 break; 294 case I210_I_PHY_ID: 295 phy->type = e1000_phy_i210; 296 phy->ops.check_polarity = e1000_check_polarity_m88; 297 phy->ops.get_info = e1000_get_phy_info_m88; 298 phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; 299 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; 300 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; 301 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 302 break; 303 default: 304 ret_val = -E1000_ERR_PHY; 305 goto out; 306 } 307 308 out: 309 return ret_val; 310 } 311 312 /** 313 * e1000_init_nvm_params_82575 - Init NVM func ptrs. 314 * @hw: pointer to the HW structure 315 **/ 316 s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) 317 { 318 struct e1000_nvm_info *nvm = &hw->nvm; 319 u32 eecd = E1000_READ_REG(hw, E1000_EECD); 320 u16 size; 321 322 DEBUGFUNC("e1000_init_nvm_params_82575"); 323 324 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 325 E1000_EECD_SIZE_EX_SHIFT); 326 /* 327 * Added to a constant, "size" becomes the left-shift value 328 * for setting word_size. 329 */ 330 size += NVM_WORD_SIZE_BASE_SHIFT; 331 332 /* Just in case size is out of range, cap it to the largest 333 * EEPROM size supported 334 */ 335 if (size > 15) 336 size = 15; 337 338 nvm->word_size = 1 << size; 339 if (hw->mac.type < e1000_i210) { 340 nvm->opcode_bits = 8; 341 nvm->delay_usec = 1; 342 343 switch (nvm->override) { 344 case e1000_nvm_override_spi_large: 345 nvm->page_size = 32; 346 nvm->address_bits = 16; 347 break; 348 case e1000_nvm_override_spi_small: 349 nvm->page_size = 8; 350 nvm->address_bits = 8; 351 break; 352 default: 353 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 354 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 355 16 : 8; 356 break; 357 } 358 if (nvm->word_size == (1 << 15)) 359 nvm->page_size = 128; 360 361 nvm->type = e1000_nvm_eeprom_spi; 362 } else { 363 nvm->type = e1000_nvm_flash_hw; 364 } 365 366 /* Function Pointers */ 367 nvm->ops.acquire = e1000_acquire_nvm_82575; 368 nvm->ops.release = e1000_release_nvm_82575; 369 if (nvm->word_size < (1 << 15)) 370 nvm->ops.read = e1000_read_nvm_eerd; 371 else 372 nvm->ops.read = e1000_read_nvm_spi; 373 374 nvm->ops.write = e1000_write_nvm_spi; 375 nvm->ops.validate = e1000_validate_nvm_checksum_generic; 376 nvm->ops.update = e1000_update_nvm_checksum_generic; 377 nvm->ops.valid_led_default = e1000_valid_led_default_82575; 378 379 /* override generic family function pointers for specific descendants */ 380 switch (hw->mac.type) { 381 case e1000_82580: 382 nvm->ops.validate = e1000_validate_nvm_checksum_82580; 383 nvm->ops.update = e1000_update_nvm_checksum_82580; 384 break; 385 case e1000_i350: 386 case e1000_i354: 387 nvm->ops.validate = e1000_validate_nvm_checksum_i350; 388 nvm->ops.update = e1000_update_nvm_checksum_i350; 389 break; 390 default: 391 break; 392 } 393 394 return E1000_SUCCESS; 395 } 396 397 /** 398 * e1000_init_mac_params_82575 - Init MAC func ptrs. 399 * @hw: pointer to the HW structure 400 **/ 401 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) 402 { 403 struct e1000_mac_info *mac = &hw->mac; 404 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 405 406 DEBUGFUNC("e1000_init_mac_params_82575"); 407 408 /* Derives media type */ 409 e1000_get_media_type_82575(hw); 410 /* Set mta register count */ 411 mac->mta_reg_count = 128; 412 /* Set uta register count */ 413 mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; 414 /* Set rar entry count */ 415 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 416 if (mac->type == e1000_82576) 417 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 418 if (mac->type == e1000_82580) 419 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 420 if (mac->type == e1000_i350 || mac->type == e1000_i354) 421 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 422 423 /* Enable EEE default settings for EEE supported devices */ 424 if (mac->type >= e1000_i350) 425 dev_spec->eee_disable = FALSE; 426 427 /* Allow a single clear of the SW semaphore on I210 and newer */ 428 if (mac->type >= e1000_i210) 429 dev_spec->clear_semaphore_once = TRUE; 430 431 /* Set if part includes ASF firmware */ 432 mac->asf_firmware_present = TRUE; 433 /* FWSM register */ 434 mac->has_fwsm = TRUE; 435 /* ARC supported; valid only if manageability features are enabled. */ 436 mac->arc_subsystem_valid = 437 !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); 438 439 /* Function pointers */ 440 441 /* bus type/speed/width */ 442 mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; 443 /* reset */ 444 if (mac->type >= e1000_82580) 445 mac->ops.reset_hw = e1000_reset_hw_82580; 446 else 447 mac->ops.reset_hw = e1000_reset_hw_82575; 448 /* hw initialization */ 449 mac->ops.init_hw = e1000_init_hw_82575; 450 /* link setup */ 451 mac->ops.setup_link = e1000_setup_link_generic; 452 /* physical interface link setup */ 453 mac->ops.setup_physical_interface = 454 (hw->phy.media_type == e1000_media_type_copper) 455 ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; 456 /* physical interface shutdown */ 457 mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; 458 /* physical interface power up */ 459 mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; 460 /* check for link */ 461 mac->ops.check_for_link = e1000_check_for_link_82575; 462 /* read mac address */ 463 mac->ops.read_mac_addr = e1000_read_mac_addr_82575; 464 /* configure collision distance */ 465 mac->ops.config_collision_dist = e1000_config_collision_dist_82575; 466 /* multicast address update */ 467 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; 468 if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { 469 /* writing VFTA */ 470 mac->ops.write_vfta = e1000_write_vfta_i350; 471 /* clearing VFTA */ 472 mac->ops.clear_vfta = e1000_clear_vfta_i350; 473 } else { 474 /* writing VFTA */ 475 mac->ops.write_vfta = e1000_write_vfta_generic; 476 /* clearing VFTA */ 477 mac->ops.clear_vfta = e1000_clear_vfta_generic; 478 } 479 if (hw->mac.type >= e1000_82580) 480 mac->ops.validate_mdi_setting = 481 e1000_validate_mdi_setting_crossover_generic; 482 /* ID LED init */ 483 mac->ops.id_led_init = e1000_id_led_init_generic; 484 /* blink LED */ 485 mac->ops.blink_led = e1000_blink_led_generic; 486 /* setup LED */ 487 mac->ops.setup_led = e1000_setup_led_generic; 488 /* cleanup LED */ 489 mac->ops.cleanup_led = e1000_cleanup_led_generic; 490 /* turn on/off LED */ 491 mac->ops.led_on = e1000_led_on_generic; 492 mac->ops.led_off = e1000_led_off_generic; 493 /* clear hardware counters */ 494 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; 495 /* link info */ 496 mac->ops.get_link_up_info = e1000_get_link_up_info_82575; 497 /* acquire SW_FW sync */ 498 mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; 499 mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; 500 if (mac->type >= e1000_i210) { 501 mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; 502 mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; 503 } 504 505 /* set lan id for port to determine which phy lock to use */ 506 hw->mac.ops.set_lan_id(hw); 507 508 return E1000_SUCCESS; 509 } 510 511 /** 512 * e1000_init_function_pointers_82575 - Init func ptrs. 513 * @hw: pointer to the HW structure 514 * 515 * Called to initialize all function pointers and parameters. 516 **/ 517 void e1000_init_function_pointers_82575(struct e1000_hw *hw) 518 { 519 DEBUGFUNC("e1000_init_function_pointers_82575"); 520 521 hw->mac.ops.init_params = e1000_init_mac_params_82575; 522 hw->nvm.ops.init_params = e1000_init_nvm_params_82575; 523 hw->phy.ops.init_params = e1000_init_phy_params_82575; 524 hw->mbx.ops.init_params = e1000_init_mbx_params_pf; 525 } 526 527 /** 528 * e1000_acquire_phy_82575 - Acquire rights to access PHY 529 * @hw: pointer to the HW structure 530 * 531 * Acquire access rights to the correct PHY. 532 **/ 533 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) 534 { 535 u16 mask = E1000_SWFW_PHY0_SM; 536 537 DEBUGFUNC("e1000_acquire_phy_82575"); 538 539 if (hw->bus.func == E1000_FUNC_1) 540 mask = E1000_SWFW_PHY1_SM; 541 else if (hw->bus.func == E1000_FUNC_2) 542 mask = E1000_SWFW_PHY2_SM; 543 else if (hw->bus.func == E1000_FUNC_3) 544 mask = E1000_SWFW_PHY3_SM; 545 546 return hw->mac.ops.acquire_swfw_sync(hw, mask); 547 } 548 549 /** 550 * e1000_release_phy_82575 - Release rights to access PHY 551 * @hw: pointer to the HW structure 552 * 553 * A wrapper to release access rights to the correct PHY. 554 **/ 555 static void e1000_release_phy_82575(struct e1000_hw *hw) 556 { 557 u16 mask = E1000_SWFW_PHY0_SM; 558 559 DEBUGFUNC("e1000_release_phy_82575"); 560 561 if (hw->bus.func == E1000_FUNC_1) 562 mask = E1000_SWFW_PHY1_SM; 563 else if (hw->bus.func == E1000_FUNC_2) 564 mask = E1000_SWFW_PHY2_SM; 565 else if (hw->bus.func == E1000_FUNC_3) 566 mask = E1000_SWFW_PHY3_SM; 567 568 hw->mac.ops.release_swfw_sync(hw, mask); 569 } 570 571 /** 572 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 573 * @hw: pointer to the HW structure 574 * @offset: register offset to be read 575 * @data: pointer to the read data 576 * 577 * Reads the PHY register at offset using the serial gigabit media independent 578 * interface and stores the retrieved information in data. 579 **/ 580 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 581 u16 *data) 582 { 583 s32 ret_val = -E1000_ERR_PARAM; 584 585 DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); 586 587 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 588 DEBUGOUT1("PHY Address %u is out of range\n", offset); 589 goto out; 590 } 591 592 ret_val = hw->phy.ops.acquire(hw); 593 if (ret_val) 594 goto out; 595 596 ret_val = e1000_read_phy_reg_i2c(hw, offset, data); 597 598 hw->phy.ops.release(hw); 599 600 out: 601 return ret_val; 602 } 603 604 /** 605 * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 606 * @hw: pointer to the HW structure 607 * @offset: register offset to write to 608 * @data: data to write at register offset 609 * 610 * Writes the data to PHY register at the offset using the serial gigabit 611 * media independent interface. 612 **/ 613 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 614 u16 data) 615 { 616 s32 ret_val = -E1000_ERR_PARAM; 617 618 DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); 619 620 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 621 DEBUGOUT1("PHY Address %d is out of range\n", offset); 622 goto out; 623 } 624 625 ret_val = hw->phy.ops.acquire(hw); 626 if (ret_val) 627 goto out; 628 629 ret_val = e1000_write_phy_reg_i2c(hw, offset, data); 630 631 hw->phy.ops.release(hw); 632 633 out: 634 return ret_val; 635 } 636 637 /** 638 * e1000_get_phy_id_82575 - Retrieve PHY addr and id 639 * @hw: pointer to the HW structure 640 * 641 * Retrieves the PHY address and ID for both PHY's which do and do not use 642 * sgmi interface. 643 **/ 644 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) 645 { 646 struct e1000_phy_info *phy = &hw->phy; 647 s32 ret_val = E1000_SUCCESS; 648 u16 phy_id; 649 u32 ctrl_ext; 650 u32 mdic; 651 652 DEBUGFUNC("e1000_get_phy_id_82575"); 653 654 /* 655 * For SGMII PHYs, we try the list of possible addresses until 656 * we find one that works. For non-SGMII PHYs 657 * (e.g. integrated copper PHYs), an address of 1 should 658 * work. The result of this function should mean phy->phy_addr 659 * and phy->id are set correctly. 660 */ 661 if (!e1000_sgmii_active_82575(hw)) { 662 phy->addr = 1; 663 ret_val = e1000_get_phy_id(hw); 664 goto out; 665 } 666 667 if (e1000_sgmii_uses_mdio_82575(hw)) { 668 switch (hw->mac.type) { 669 case e1000_82575: 670 case e1000_82576: 671 mdic = E1000_READ_REG(hw, E1000_MDIC); 672 mdic &= E1000_MDIC_PHY_MASK; 673 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 674 break; 675 case e1000_82580: 676 case e1000_i350: 677 case e1000_i354: 678 case e1000_i210: 679 case e1000_i211: 680 mdic = E1000_READ_REG(hw, E1000_MDICNFG); 681 mdic &= E1000_MDICNFG_PHY_MASK; 682 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 683 break; 684 default: 685 ret_val = -E1000_ERR_PHY; 686 goto out; 687 break; 688 } 689 ret_val = e1000_get_phy_id(hw); 690 goto out; 691 } 692 693 /* Power on sgmii phy if it is disabled */ 694 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 695 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 696 ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 697 E1000_WRITE_FLUSH(hw); 698 msec_delay(300); 699 700 /* 701 * The address field in the I2CCMD register is 3 bits and 0 is invalid. 702 * Therefore, we need to test 1-7 703 */ 704 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 705 ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 706 if (ret_val == E1000_SUCCESS) { 707 DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", 708 phy_id, phy->addr); 709 /* 710 * At the time of this writing, The M88 part is 711 * the only supported SGMII PHY product. 712 */ 713 if (phy_id == M88_VENDOR) 714 break; 715 } else { 716 DEBUGOUT1("PHY address %u was unreadable\n", 717 phy->addr); 718 } 719 } 720 721 /* A valid PHY type couldn't be found. */ 722 if (phy->addr == 8) { 723 phy->addr = 0; 724 ret_val = -E1000_ERR_PHY; 725 } else { 726 ret_val = e1000_get_phy_id(hw); 727 } 728 729 /* restore previous sfp cage power state */ 730 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 731 732 out: 733 return ret_val; 734 } 735 736 /** 737 * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset 738 * @hw: pointer to the HW structure 739 * 740 * Resets the PHY using the serial gigabit media independent interface. 741 **/ 742 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 743 { 744 s32 ret_val = E1000_SUCCESS; 745 746 DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); 747 748 /* 749 * This isn't a TRUE "hard" reset, but is the only reset 750 * available to us at this time. 751 */ 752 753 DEBUGOUT("Soft resetting SGMII attached PHY...\n"); 754 755 if (!(hw->phy.ops.write_reg)) 756 goto out; 757 758 /* 759 * SFP documentation requires the following to configure the SPF module 760 * to work on SGMII. No further documentation is given. 761 */ 762 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 763 if (ret_val) 764 goto out; 765 766 ret_val = hw->phy.ops.commit(hw); 767 768 out: 769 return ret_val; 770 } 771 772 /** 773 * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 774 * @hw: pointer to the HW structure 775 * @active: TRUE to enable LPLU, FALSE to disable 776 * 777 * Sets the LPLU D0 state according to the active flag. When 778 * activating LPLU this function also disables smart speed 779 * and vice versa. LPLU will not be activated unless the 780 * device autonegotiation advertisement meets standards of 781 * either 10 or 10/100 or 10/100/1000 at all duplexes. 782 * This is a function pointer entry point only called by 783 * PHY setup routines. 784 **/ 785 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 786 { 787 struct e1000_phy_info *phy = &hw->phy; 788 s32 ret_val = E1000_SUCCESS; 789 u16 data; 790 791 DEBUGFUNC("e1000_set_d0_lplu_state_82575"); 792 793 if (!(hw->phy.ops.read_reg)) 794 goto out; 795 796 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 797 if (ret_val) 798 goto out; 799 800 if (active) { 801 data |= IGP02E1000_PM_D0_LPLU; 802 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 803 data); 804 if (ret_val) 805 goto out; 806 807 /* When LPLU is enabled, we should disable SmartSpeed */ 808 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 809 &data); 810 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 811 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 812 data); 813 if (ret_val) 814 goto out; 815 } else { 816 data &= ~IGP02E1000_PM_D0_LPLU; 817 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 818 data); 819 /* 820 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 821 * during Dx states where the power conservation is most 822 * important. During driver activity we should enable 823 * SmartSpeed, so performance is maintained. 824 */ 825 if (phy->smart_speed == e1000_smart_speed_on) { 826 ret_val = phy->ops.read_reg(hw, 827 IGP01E1000_PHY_PORT_CONFIG, 828 &data); 829 if (ret_val) 830 goto out; 831 832 data |= IGP01E1000_PSCFR_SMART_SPEED; 833 ret_val = phy->ops.write_reg(hw, 834 IGP01E1000_PHY_PORT_CONFIG, 835 data); 836 if (ret_val) 837 goto out; 838 } else if (phy->smart_speed == e1000_smart_speed_off) { 839 ret_val = phy->ops.read_reg(hw, 840 IGP01E1000_PHY_PORT_CONFIG, 841 &data); 842 if (ret_val) 843 goto out; 844 845 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 846 ret_val = phy->ops.write_reg(hw, 847 IGP01E1000_PHY_PORT_CONFIG, 848 data); 849 if (ret_val) 850 goto out; 851 } 852 } 853 854 out: 855 return ret_val; 856 } 857 858 /** 859 * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state 860 * @hw: pointer to the HW structure 861 * @active: TRUE to enable LPLU, FALSE to disable 862 * 863 * Sets the LPLU D0 state according to the active flag. When 864 * activating LPLU this function also disables smart speed 865 * and vice versa. LPLU will not be activated unless the 866 * device autonegotiation advertisement meets standards of 867 * either 10 or 10/100 or 10/100/1000 at all duplexes. 868 * This is a function pointer entry point only called by 869 * PHY setup routines. 870 **/ 871 static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 872 { 873 struct e1000_phy_info *phy = &hw->phy; 874 s32 ret_val = E1000_SUCCESS; 875 u32 data; 876 877 DEBUGFUNC("e1000_set_d0_lplu_state_82580"); 878 879 data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 880 881 if (active) { 882 data |= E1000_82580_PM_D0_LPLU; 883 884 /* When LPLU is enabled, we should disable SmartSpeed */ 885 data &= ~E1000_82580_PM_SPD; 886 } else { 887 data &= ~E1000_82580_PM_D0_LPLU; 888 889 /* 890 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 891 * during Dx states where the power conservation is most 892 * important. During driver activity we should enable 893 * SmartSpeed, so performance is maintained. 894 */ 895 if (phy->smart_speed == e1000_smart_speed_on) 896 data |= E1000_82580_PM_SPD; 897 else if (phy->smart_speed == e1000_smart_speed_off) 898 data &= ~E1000_82580_PM_SPD; 899 } 900 901 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); 902 return ret_val; 903 } 904 905 /** 906 * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 907 * @hw: pointer to the HW structure 908 * @active: boolean used to enable/disable lplu 909 * 910 * Success returns 0, Failure returns 1 911 * 912 * The low power link up (lplu) state is set to the power management level D3 913 * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3 914 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU 915 * is used during Dx states where the power conservation is most important. 916 * During driver activity, SmartSpeed should be enabled so performance is 917 * maintained. 918 **/ 919 s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 920 { 921 struct e1000_phy_info *phy = &hw->phy; 922 s32 ret_val = E1000_SUCCESS; 923 u32 data; 924 925 DEBUGFUNC("e1000_set_d3_lplu_state_82580"); 926 927 data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 928 929 if (!active) { 930 data &= ~E1000_82580_PM_D3_LPLU; 931 /* 932 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 933 * during Dx states where the power conservation is most 934 * important. During driver activity we should enable 935 * SmartSpeed, so performance is maintained. 936 */ 937 if (phy->smart_speed == e1000_smart_speed_on) 938 data |= E1000_82580_PM_SPD; 939 else if (phy->smart_speed == e1000_smart_speed_off) 940 data &= ~E1000_82580_PM_SPD; 941 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 942 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 943 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 944 data |= E1000_82580_PM_D3_LPLU; 945 /* When LPLU is enabled, we should disable SmartSpeed */ 946 data &= ~E1000_82580_PM_SPD; 947 } 948 949 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); 950 return ret_val; 951 } 952 953 /** 954 * e1000_acquire_nvm_82575 - Request for access to EEPROM 955 * @hw: pointer to the HW structure 956 * 957 * Acquire the necessary semaphores for exclusive access to the EEPROM. 958 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 959 * Return successful if access grant bit set, else clear the request for 960 * EEPROM access and return -E1000_ERR_NVM (-1). 961 **/ 962 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) 963 { 964 s32 ret_val; 965 966 DEBUGFUNC("e1000_acquire_nvm_82575"); 967 968 ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 969 if (ret_val) 970 goto out; 971 972 /* 973 * Check if there is some access 974 * error this access may hook on 975 */ 976 if (hw->mac.type == e1000_i350) { 977 u32 eecd = E1000_READ_REG(hw, E1000_EECD); 978 if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | 979 E1000_EECD_TIMEOUT)) { 980 /* Clear all access error flags */ 981 E1000_WRITE_REG(hw, E1000_EECD, eecd | 982 E1000_EECD_ERROR_CLR); 983 DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); 984 } 985 } 986 if (hw->mac.type == e1000_82580) { 987 u32 eecd = E1000_READ_REG(hw, E1000_EECD); 988 if (eecd & E1000_EECD_BLOCKED) { 989 /* Clear access error flag */ 990 E1000_WRITE_REG(hw, E1000_EECD, eecd | 991 E1000_EECD_BLOCKED); 992 DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); 993 } 994 } 995 996 997 ret_val = e1000_acquire_nvm_generic(hw); 998 if (ret_val) 999 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 1000 1001 out: 1002 return ret_val; 1003 } 1004 1005 /** 1006 * e1000_release_nvm_82575 - Release exclusive access to EEPROM 1007 * @hw: pointer to the HW structure 1008 * 1009 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 1010 * then release the semaphores acquired. 1011 **/ 1012 static void e1000_release_nvm_82575(struct e1000_hw *hw) 1013 { 1014 DEBUGFUNC("e1000_release_nvm_82575"); 1015 1016 e1000_release_nvm_generic(hw); 1017 1018 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); 1019 } 1020 1021 /** 1022 * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 1023 * @hw: pointer to the HW structure 1024 * @mask: specifies which semaphore to acquire 1025 * 1026 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 1027 * will also specify which port we're acquiring the lock for. 1028 **/ 1029 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1030 { 1031 u32 swfw_sync; 1032 u32 swmask = mask; 1033 u32 fwmask = mask << 16; 1034 s32 ret_val = E1000_SUCCESS; 1035 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 1036 1037 DEBUGFUNC("e1000_acquire_swfw_sync_82575"); 1038 1039 while (i < timeout) { 1040 if (e1000_get_hw_semaphore_generic(hw)) { 1041 ret_val = -E1000_ERR_SWFW_SYNC; 1042 goto out; 1043 } 1044 1045 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); 1046 if (!(swfw_sync & (fwmask | swmask))) 1047 break; 1048 1049 /* 1050 * Firmware currently using resource (fwmask) 1051 * or other software thread using resource (swmask) 1052 */ 1053 e1000_put_hw_semaphore_generic(hw); 1054 msec_delay_irq(5); 1055 i++; 1056 } 1057 1058 if (i == timeout) { 1059 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 1060 ret_val = -E1000_ERR_SWFW_SYNC; 1061 goto out; 1062 } 1063 1064 swfw_sync |= swmask; 1065 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); 1066 1067 e1000_put_hw_semaphore_generic(hw); 1068 1069 out: 1070 return ret_val; 1071 } 1072 1073 /** 1074 * e1000_release_swfw_sync_82575 - Release SW/FW semaphore 1075 * @hw: pointer to the HW structure 1076 * @mask: specifies which semaphore to acquire 1077 * 1078 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 1079 * will also specify which port we're releasing the lock for. 1080 **/ 1081 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1082 { 1083 u32 swfw_sync; 1084 1085 DEBUGFUNC("e1000_release_swfw_sync_82575"); 1086 1087 while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) 1088 ; /* Empty */ 1089 1090 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); 1091 swfw_sync &= ~mask; 1092 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); 1093 1094 e1000_put_hw_semaphore_generic(hw); 1095 } 1096 1097 /** 1098 * e1000_get_cfg_done_82575 - Read config done bit 1099 * @hw: pointer to the HW structure 1100 * 1101 * Read the management control register for the config done bit for 1102 * completion status. NOTE: silicon which is EEPROM-less will fail trying 1103 * to read the config done bit, so an error is *ONLY* logged and returns 1104 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon 1105 * would not be able to be reset or change link. 1106 **/ 1107 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) 1108 { 1109 s32 timeout = PHY_CFG_TIMEOUT; 1110 s32 ret_val = E1000_SUCCESS; 1111 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1112 1113 DEBUGFUNC("e1000_get_cfg_done_82575"); 1114 1115 if (hw->bus.func == E1000_FUNC_1) 1116 mask = E1000_NVM_CFG_DONE_PORT_1; 1117 else if (hw->bus.func == E1000_FUNC_2) 1118 mask = E1000_NVM_CFG_DONE_PORT_2; 1119 else if (hw->bus.func == E1000_FUNC_3) 1120 mask = E1000_NVM_CFG_DONE_PORT_3; 1121 while (timeout) { 1122 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) 1123 break; 1124 msec_delay(1); 1125 timeout--; 1126 } 1127 if (!timeout) 1128 DEBUGOUT("MNG configuration cycle has not completed.\n"); 1129 1130 /* If EEPROM is not marked present, init the PHY manually */ 1131 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && 1132 (hw->phy.type == e1000_phy_igp_3)) 1133 e1000_phy_init_script_igp3(hw); 1134 1135 return ret_val; 1136 } 1137 1138 /** 1139 * e1000_get_link_up_info_82575 - Get link speed/duplex info 1140 * @hw: pointer to the HW structure 1141 * @speed: stores the current speed 1142 * @duplex: stores the current duplex 1143 * 1144 * This is a wrapper function, if using the serial gigabit media independent 1145 * interface, use PCS to retrieve the link speed and duplex information. 1146 * Otherwise, use the generic function to get the link speed and duplex info. 1147 **/ 1148 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 1149 u16 *duplex) 1150 { 1151 s32 ret_val; 1152 1153 DEBUGFUNC("e1000_get_link_up_info_82575"); 1154 1155 if (hw->phy.media_type != e1000_media_type_copper) 1156 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, 1157 duplex); 1158 else 1159 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, 1160 duplex); 1161 1162 return ret_val; 1163 } 1164 1165 /** 1166 * e1000_check_for_link_82575 - Check for link 1167 * @hw: pointer to the HW structure 1168 * 1169 * If sgmii is enabled, then use the pcs register to determine link, otherwise 1170 * use the generic interface for determining link. 1171 **/ 1172 static s32 e1000_check_for_link_82575(struct e1000_hw *hw) 1173 { 1174 s32 ret_val; 1175 u16 speed, duplex; 1176 1177 DEBUGFUNC("e1000_check_for_link_82575"); 1178 1179 if (hw->phy.media_type != e1000_media_type_copper) { 1180 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, 1181 &duplex); 1182 /* 1183 * Use this flag to determine if link needs to be checked or 1184 * not. If we have link clear the flag so that we do not 1185 * continue to check for link. 1186 */ 1187 hw->mac.get_link_status = !hw->mac.serdes_has_link; 1188 1189 /* 1190 * Configure Flow Control now that Auto-Neg has completed. 1191 * First, we need to restore the desired flow control 1192 * settings because we may have had to re-autoneg with a 1193 * different link partner. 1194 */ 1195 ret_val = e1000_config_fc_after_link_up_generic(hw); 1196 if (ret_val) 1197 DEBUGOUT("Error configuring flow control\n"); 1198 } else { 1199 ret_val = e1000_check_for_copper_link_generic(hw); 1200 } 1201 1202 return ret_val; 1203 } 1204 1205 /** 1206 * e1000_check_for_link_media_swap - Check which M88E1112 interface linked 1207 * @hw: pointer to the HW structure 1208 * 1209 * Poll the M88E1112 interfaces to see which interface achieved link. 1210 */ 1211 static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) 1212 { 1213 struct e1000_phy_info *phy = &hw->phy; 1214 s32 ret_val; 1215 u16 data; 1216 u8 port = 0; 1217 1218 DEBUGFUNC("e1000_check_for_link_media_swap"); 1219 1220 /* Check the copper medium. */ 1221 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); 1222 if (ret_val) 1223 return ret_val; 1224 1225 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 1226 if (ret_val) 1227 return ret_val; 1228 1229 if (data & E1000_M88E1112_STATUS_LINK) 1230 port = E1000_MEDIA_PORT_COPPER; 1231 1232 /* Check the other medium. */ 1233 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); 1234 if (ret_val) 1235 return ret_val; 1236 1237 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 1238 if (ret_val) 1239 return ret_val; 1240 1241 if (data & E1000_M88E1112_STATUS_LINK) 1242 port = E1000_MEDIA_PORT_OTHER; 1243 1244 /* Determine if a swap needs to happen. */ 1245 if (port && (hw->dev_spec._82575.media_port != port)) { 1246 hw->dev_spec._82575.media_port = port; 1247 hw->dev_spec._82575.media_changed = TRUE; 1248 } else { 1249 ret_val = e1000_check_for_link_82575(hw); 1250 } 1251 1252 return E1000_SUCCESS; 1253 } 1254 1255 /** 1256 * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown 1257 * @hw: pointer to the HW structure 1258 **/ 1259 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) 1260 { 1261 u32 reg; 1262 1263 DEBUGFUNC("e1000_power_up_serdes_link_82575"); 1264 1265 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1266 !e1000_sgmii_active_82575(hw)) 1267 return; 1268 1269 /* Enable PCS to turn on link */ 1270 reg = E1000_READ_REG(hw, E1000_PCS_CFG0); 1271 reg |= E1000_PCS_CFG_PCS_EN; 1272 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); 1273 1274 /* Power up the laser */ 1275 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1276 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 1277 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 1278 1279 /* flush the write to verify completion */ 1280 E1000_WRITE_FLUSH(hw); 1281 msec_delay(1); 1282 } 1283 1284 /** 1285 * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 1286 * @hw: pointer to the HW structure 1287 * @speed: stores the current speed 1288 * @duplex: stores the current duplex 1289 * 1290 * Using the physical coding sub-layer (PCS), retrieve the current speed and 1291 * duplex, then store the values in the pointers provided. 1292 **/ 1293 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, 1294 u16 *speed, u16 *duplex) 1295 { 1296 struct e1000_mac_info *mac = &hw->mac; 1297 u32 pcs; 1298 1299 DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); 1300 1301 /* 1302 * Read the PCS Status register for link state. For non-copper mode, 1303 * the status register is not accurate. The PCS status register is 1304 * used instead. 1305 */ 1306 pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); 1307 1308 /* 1309 * The link up bit determines when link is up on autoneg. 1310 */ 1311 if (pcs & E1000_PCS_LSTS_LINK_OK) { 1312 mac->serdes_has_link = TRUE; 1313 1314 /* Detect and store PCS speed */ 1315 if (pcs & E1000_PCS_LSTS_SPEED_1000) 1316 *speed = SPEED_1000; 1317 else if (pcs & E1000_PCS_LSTS_SPEED_100) 1318 *speed = SPEED_100; 1319 else 1320 *speed = SPEED_10; 1321 1322 /* Detect and store PCS duplex */ 1323 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) 1324 *duplex = FULL_DUPLEX; 1325 else 1326 *duplex = HALF_DUPLEX; 1327 1328 } else { 1329 mac->serdes_has_link = FALSE; 1330 *speed = 0; 1331 *duplex = 0; 1332 } 1333 1334 return E1000_SUCCESS; 1335 } 1336 1337 /** 1338 * e1000_shutdown_serdes_link_82575 - Remove link during power down 1339 * @hw: pointer to the HW structure 1340 * 1341 * In the case of serdes shut down sfp and PCS on driver unload 1342 * when management pass thru is not enabled. 1343 **/ 1344 void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) 1345 { 1346 u32 reg; 1347 1348 DEBUGFUNC("e1000_shutdown_serdes_link_82575"); 1349 1350 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1351 !e1000_sgmii_active_82575(hw)) 1352 return; 1353 1354 if (!e1000_enable_mng_pass_thru(hw)) { 1355 /* Disable PCS to turn off link */ 1356 reg = E1000_READ_REG(hw, E1000_PCS_CFG0); 1357 reg &= ~E1000_PCS_CFG_PCS_EN; 1358 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); 1359 1360 /* shutdown the laser */ 1361 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1362 reg |= E1000_CTRL_EXT_SDP3_DATA; 1363 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 1364 1365 /* flush the write to verify completion */ 1366 E1000_WRITE_FLUSH(hw); 1367 msec_delay(1); 1368 } 1369 1370 return; 1371 } 1372 1373 /** 1374 * e1000_reset_hw_82575 - Reset hardware 1375 * @hw: pointer to the HW structure 1376 * 1377 * This resets the hardware into a known state. 1378 **/ 1379 static s32 e1000_reset_hw_82575(struct e1000_hw *hw) 1380 { 1381 u32 ctrl; 1382 s32 ret_val; 1383 1384 DEBUGFUNC("e1000_reset_hw_82575"); 1385 1386 /* 1387 * Prevent the PCI-E bus from sticking if there is no TLP connection 1388 * on the last TLP read/write transaction when MAC is reset. 1389 */ 1390 ret_val = e1000_disable_pcie_master_generic(hw); 1391 if (ret_val) 1392 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 1393 1394 /* set the completion timeout for interface */ 1395 ret_val = e1000_set_pcie_completion_timeout(hw); 1396 if (ret_val) 1397 DEBUGOUT("PCI-E Set completion timeout has failed.\n"); 1398 1399 DEBUGOUT("Masking off all interrupts\n"); 1400 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1401 1402 E1000_WRITE_REG(hw, E1000_RCTL, 0); 1403 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 1404 E1000_WRITE_FLUSH(hw); 1405 1406 msec_delay(10); 1407 1408 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1409 1410 DEBUGOUT("Issuing a global reset to MAC\n"); 1411 E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); 1412 1413 ret_val = e1000_get_auto_rd_done_generic(hw); 1414 if (ret_val) { 1415 /* 1416 * When auto config read does not complete, do not 1417 * return with an error. This can happen in situations 1418 * where there is no eeprom and prevents getting link. 1419 */ 1420 DEBUGOUT("Auto Read Done did not complete\n"); 1421 } 1422 1423 /* If EEPROM is not present, run manual init scripts */ 1424 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) 1425 e1000_reset_init_script_82575(hw); 1426 1427 /* Clear any pending interrupt events. */ 1428 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1429 E1000_READ_REG(hw, E1000_ICR); 1430 1431 /* Install any alternate MAC address into RAR0 */ 1432 ret_val = e1000_check_alt_mac_addr_generic(hw); 1433 1434 return ret_val; 1435 } 1436 1437 /** 1438 * e1000_init_hw_82575 - Initialize hardware 1439 * @hw: pointer to the HW structure 1440 * 1441 * This inits the hardware readying it for operation. 1442 **/ 1443 static s32 e1000_init_hw_82575(struct e1000_hw *hw) 1444 { 1445 struct e1000_mac_info *mac = &hw->mac; 1446 s32 ret_val; 1447 u16 i, rar_count = mac->rar_entry_count; 1448 1449 DEBUGFUNC("e1000_init_hw_82575"); 1450 1451 /* Initialize identification LED */ 1452 ret_val = mac->ops.id_led_init(hw); 1453 if (ret_val) { 1454 DEBUGOUT("Error initializing identification LED\n"); 1455 /* This is not fatal and we should not stop init due to this */ 1456 } 1457 1458 /* Disabling VLAN filtering */ 1459 DEBUGOUT("Initializing the IEEE VLAN\n"); 1460 mac->ops.clear_vfta(hw); 1461 1462 /* Setup the receive address */ 1463 e1000_init_rx_addrs_generic(hw, rar_count); 1464 1465 /* Zero out the Multicast HASH table */ 1466 DEBUGOUT("Zeroing the MTA\n"); 1467 for (i = 0; i < mac->mta_reg_count; i++) 1468 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 1469 1470 /* Zero out the Unicast HASH table */ 1471 DEBUGOUT("Zeroing the UTA\n"); 1472 for (i = 0; i < mac->uta_reg_count; i++) 1473 E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); 1474 1475 /* Setup link and flow control */ 1476 ret_val = mac->ops.setup_link(hw); 1477 1478 /* Set the default MTU size */ 1479 hw->dev_spec._82575.mtu = 1500; 1480 1481 /* 1482 * Clear all of the statistics registers (clear on read). It is 1483 * important that we do this after we have tried to establish link 1484 * because the symbol error count will increment wildly if there 1485 * is no link. 1486 */ 1487 e1000_clear_hw_cntrs_82575(hw); 1488 1489 return ret_val; 1490 } 1491 1492 /** 1493 * e1000_setup_copper_link_82575 - Configure copper link settings 1494 * @hw: pointer to the HW structure 1495 * 1496 * Configures the link for auto-neg or forced speed and duplex. Then we check 1497 * for link, once link is established calls to configure collision distance 1498 * and flow control are called. 1499 **/ 1500 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) 1501 { 1502 u32 ctrl; 1503 s32 ret_val; 1504 u32 phpm_reg; 1505 1506 DEBUGFUNC("e1000_setup_copper_link_82575"); 1507 1508 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1509 ctrl |= E1000_CTRL_SLU; 1510 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1511 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 1512 1513 /* Clear Go Link Disconnect bit */ 1514 if (hw->mac.type >= e1000_82580) { 1515 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1516 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1517 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1518 } 1519 1520 ret_val = e1000_setup_serdes_link_82575(hw); 1521 if (ret_val) 1522 goto out; 1523 1524 if (e1000_sgmii_active_82575(hw)) { 1525 /* allow time for SFP cage time to power up phy */ 1526 msec_delay(300); 1527 1528 ret_val = hw->phy.ops.reset(hw); 1529 if (ret_val) { 1530 DEBUGOUT("Error resetting the PHY.\n"); 1531 goto out; 1532 } 1533 } 1534 switch (hw->phy.type) { 1535 case e1000_phy_i210: 1536 case e1000_phy_m88: 1537 switch (hw->phy.id) { 1538 case I347AT4_E_PHY_ID: 1539 case M88E1112_E_PHY_ID: 1540 case M88E1340M_E_PHY_ID: 1541 case M88E1545_E_PHY_ID: 1542 case I210_I_PHY_ID: 1543 ret_val = e1000_copper_link_setup_m88_gen2(hw); 1544 break; 1545 default: 1546 ret_val = e1000_copper_link_setup_m88(hw); 1547 break; 1548 } 1549 break; 1550 case e1000_phy_igp_3: 1551 ret_val = e1000_copper_link_setup_igp(hw); 1552 break; 1553 case e1000_phy_82580: 1554 ret_val = e1000_copper_link_setup_82577(hw); 1555 break; 1556 default: 1557 ret_val = -E1000_ERR_PHY; 1558 break; 1559 } 1560 1561 if (ret_val) 1562 goto out; 1563 1564 ret_val = e1000_setup_copper_link_generic(hw); 1565 out: 1566 return ret_val; 1567 } 1568 1569 /** 1570 * e1000_setup_serdes_link_82575 - Setup link for serdes 1571 * @hw: pointer to the HW structure 1572 * 1573 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1574 * used on copper connections where the serialized gigabit media independent 1575 * interface (sgmii), or serdes fiber is being used. Configures the link 1576 * for auto-negotiation or forces speed/duplex. 1577 **/ 1578 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) 1579 { 1580 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1581 bool pcs_autoneg; 1582 s32 ret_val = E1000_SUCCESS; 1583 u16 data; 1584 1585 DEBUGFUNC("e1000_setup_serdes_link_82575"); 1586 1587 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1588 !e1000_sgmii_active_82575(hw)) 1589 return ret_val; 1590 1591 /* 1592 * On the 82575, SerDes loopback mode persists until it is 1593 * explicitly turned off or a power cycle is performed. A read to 1594 * the register does not indicate its status. Therefore, we ensure 1595 * loopback mode is disabled during initialization. 1596 */ 1597 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1598 1599 /* power on the sfp cage if present */ 1600 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1601 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1602 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1603 1604 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); 1605 ctrl_reg |= E1000_CTRL_SLU; 1606 1607 /* set both sw defined pins on 82575/82576*/ 1608 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) 1609 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1610 1611 reg = E1000_READ_REG(hw, E1000_PCS_LCTL); 1612 1613 /* default pcs_autoneg to the same setting as mac autoneg */ 1614 pcs_autoneg = hw->mac.autoneg; 1615 1616 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1617 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1618 /* sgmii mode lets the phy handle forcing speed/duplex */ 1619 pcs_autoneg = TRUE; 1620 /* autoneg time out should be disabled for SGMII mode */ 1621 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1622 break; 1623 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1624 /* disable PCS autoneg and support parallel detect only */ 1625 pcs_autoneg = FALSE; 1626 /* fall through to default case */ 1627 default: 1628 if (hw->mac.type == e1000_82575 || 1629 hw->mac.type == e1000_82576) { 1630 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1631 if (ret_val) { 1632 DEBUGOUT("NVM Read Error\n"); 1633 return ret_val; 1634 } 1635 1636 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1637 pcs_autoneg = FALSE; 1638 } 1639 1640 /* 1641 * non-SGMII modes only supports a speed of 1000/Full for the 1642 * link so it is best to just force the MAC and let the pcs 1643 * link either autoneg or be forced to 1000/Full 1644 */ 1645 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1646 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1647 1648 /* set speed of 1000/Full if speed/duplex is forced */ 1649 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1650 break; 1651 } 1652 1653 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); 1654 1655 /* 1656 * New SerDes mode allows for forcing speed or autonegotiating speed 1657 * at 1gb. Autoneg should be default set by most drivers. This is the 1658 * mode that will be compatible with older link partners and switches. 1659 * However, both are supported by the hardware and some drivers/tools. 1660 */ 1661 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1662 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1663 1664 if (pcs_autoneg) { 1665 /* Set PCS register for autoneg */ 1666 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1667 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1668 1669 /* Disable force flow control for autoneg */ 1670 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; 1671 1672 /* Configure flow control advertisement for autoneg */ 1673 anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); 1674 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); 1675 1676 switch (hw->fc.requested_mode) { 1677 case e1000_fc_full: 1678 case e1000_fc_rx_pause: 1679 anadv_reg |= E1000_TXCW_ASM_DIR; 1680 anadv_reg |= E1000_TXCW_PAUSE; 1681 break; 1682 case e1000_fc_tx_pause: 1683 anadv_reg |= E1000_TXCW_ASM_DIR; 1684 break; 1685 default: 1686 break; 1687 } 1688 1689 E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); 1690 1691 DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1692 } else { 1693 /* Set PCS register for forced link */ 1694 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1695 1696 /* Force flow control for forced link */ 1697 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1698 1699 DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1700 } 1701 1702 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); 1703 1704 if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) 1705 e1000_force_mac_fc_generic(hw); 1706 1707 return ret_val; 1708 } 1709 1710 /** 1711 * e1000_get_media_type_82575 - derives current media type. 1712 * @hw: pointer to the HW structure 1713 * 1714 * The media type is chosen reflecting few settings. 1715 * The following are taken into account: 1716 * - link mode set in the current port Init Control Word #3 1717 * - current link mode settings in CSR register 1718 * - MDIO vs. I2C PHY control interface chosen 1719 * - SFP module media type 1720 **/ 1721 static s32 e1000_get_media_type_82575(struct e1000_hw *hw) 1722 { 1723 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1724 s32 ret_val = E1000_SUCCESS; 1725 u32 ctrl_ext = 0; 1726 u32 link_mode = 0; 1727 1728 /* Set internal phy as default */ 1729 dev_spec->sgmii_active = FALSE; 1730 dev_spec->module_plugged = FALSE; 1731 1732 /* Get CSR setting */ 1733 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1734 1735 /* extract link mode setting */ 1736 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; 1737 1738 switch (link_mode) { 1739 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1740 hw->phy.media_type = e1000_media_type_internal_serdes; 1741 break; 1742 case E1000_CTRL_EXT_LINK_MODE_GMII: 1743 hw->phy.media_type = e1000_media_type_copper; 1744 break; 1745 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1746 /* Get phy control interface type set (MDIO vs. I2C)*/ 1747 if (e1000_sgmii_uses_mdio_82575(hw)) { 1748 hw->phy.media_type = e1000_media_type_copper; 1749 dev_spec->sgmii_active = TRUE; 1750 break; 1751 } 1752 /* fall through for I2C based SGMII */ 1753 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 1754 /* read media type from SFP EEPROM */ 1755 ret_val = e1000_set_sfp_media_type_82575(hw); 1756 if ((ret_val != E1000_SUCCESS) || 1757 (hw->phy.media_type == e1000_media_type_unknown)) { 1758 /* 1759 * If media type was not identified then return media 1760 * type defined by the CTRL_EXT settings. 1761 */ 1762 hw->phy.media_type = e1000_media_type_internal_serdes; 1763 1764 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { 1765 hw->phy.media_type = e1000_media_type_copper; 1766 dev_spec->sgmii_active = TRUE; 1767 } 1768 1769 break; 1770 } 1771 1772 /* do not change link mode for 100BaseFX */ 1773 if (dev_spec->eth_flags.e100_base_fx) 1774 break; 1775 1776 /* change current link mode setting */ 1777 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; 1778 1779 if (hw->phy.media_type == e1000_media_type_copper) 1780 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; 1781 else 1782 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 1783 1784 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1785 1786 break; 1787 } 1788 1789 return ret_val; 1790 } 1791 1792 /** 1793 * e1000_set_sfp_media_type_82575 - derives SFP module media type. 1794 * @hw: pointer to the HW structure 1795 * 1796 * The media type is chosen based on SFP module. 1797 * compatibility flags retrieved from SFP ID EEPROM. 1798 **/ 1799 static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) 1800 { 1801 s32 ret_val = E1000_ERR_CONFIG; 1802 u32 ctrl_ext = 0; 1803 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1804 struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; 1805 u8 tranceiver_type = 0; 1806 s32 timeout = 3; 1807 1808 /* Turn I2C interface ON and power on sfp cage */ 1809 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1810 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1811 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); 1812 1813 E1000_WRITE_FLUSH(hw); 1814 1815 /* Read SFP module data */ 1816 while (timeout) { 1817 ret_val = e1000_read_sfp_data_byte(hw, 1818 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), 1819 &tranceiver_type); 1820 if (ret_val == E1000_SUCCESS) 1821 break; 1822 msec_delay(100); 1823 timeout--; 1824 } 1825 if (ret_val != E1000_SUCCESS) 1826 goto out; 1827 1828 ret_val = e1000_read_sfp_data_byte(hw, 1829 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), 1830 (u8 *)eth_flags); 1831 if (ret_val != E1000_SUCCESS) 1832 goto out; 1833 1834 /* Check if there is some SFP module plugged and powered */ 1835 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || 1836 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { 1837 dev_spec->module_plugged = TRUE; 1838 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { 1839 hw->phy.media_type = e1000_media_type_internal_serdes; 1840 } else if (eth_flags->e100_base_fx) { 1841 dev_spec->sgmii_active = TRUE; 1842 hw->phy.media_type = e1000_media_type_internal_serdes; 1843 } else if (eth_flags->e1000_base_t) { 1844 dev_spec->sgmii_active = TRUE; 1845 hw->phy.media_type = e1000_media_type_copper; 1846 } else { 1847 hw->phy.media_type = e1000_media_type_unknown; 1848 DEBUGOUT("PHY module has not been recognized\n"); 1849 goto out; 1850 } 1851 } else { 1852 hw->phy.media_type = e1000_media_type_unknown; 1853 } 1854 ret_val = E1000_SUCCESS; 1855 out: 1856 /* Restore I2C interface setting */ 1857 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1858 return ret_val; 1859 } 1860 1861 /** 1862 * e1000_valid_led_default_82575 - Verify a valid default LED config 1863 * @hw: pointer to the HW structure 1864 * @data: pointer to the NVM (EEPROM) 1865 * 1866 * Read the EEPROM for the current default LED configuration. If the 1867 * LED configuration is not valid, set to a valid LED configuration. 1868 **/ 1869 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) 1870 { 1871 s32 ret_val; 1872 1873 DEBUGFUNC("e1000_valid_led_default_82575"); 1874 1875 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 1876 if (ret_val) { 1877 DEBUGOUT("NVM Read Error\n"); 1878 goto out; 1879 } 1880 1881 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { 1882 switch (hw->phy.media_type) { 1883 case e1000_media_type_internal_serdes: 1884 *data = ID_LED_DEFAULT_82575_SERDES; 1885 break; 1886 case e1000_media_type_copper: 1887 default: 1888 *data = ID_LED_DEFAULT; 1889 break; 1890 } 1891 } 1892 out: 1893 return ret_val; 1894 } 1895 1896 /** 1897 * e1000_sgmii_active_82575 - Return sgmii state 1898 * @hw: pointer to the HW structure 1899 * 1900 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1901 * which can be enabled for use in the embedded applications. Simply 1902 * return the current state of the sgmii interface. 1903 **/ 1904 static bool e1000_sgmii_active_82575(struct e1000_hw *hw) 1905 { 1906 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1907 return dev_spec->sgmii_active; 1908 } 1909 1910 /** 1911 * e1000_reset_init_script_82575 - Inits HW defaults after reset 1912 * @hw: pointer to the HW structure 1913 * 1914 * Inits recommended HW defaults after a reset when there is no EEPROM 1915 * detected. This is only for the 82575. 1916 **/ 1917 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) 1918 { 1919 DEBUGFUNC("e1000_reset_init_script_82575"); 1920 1921 if (hw->mac.type == e1000_82575) { 1922 DEBUGOUT("Running reset init script for 82575\n"); 1923 /* SerDes configuration via SERDESCTRL */ 1924 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); 1925 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); 1926 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); 1927 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); 1928 1929 /* CCM configuration via CCMCTL register */ 1930 e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); 1931 e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); 1932 1933 /* PCIe lanes configuration */ 1934 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); 1935 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); 1936 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); 1937 e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); 1938 1939 /* PCIe PLL Configuration */ 1940 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); 1941 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); 1942 e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); 1943 } 1944 1945 return E1000_SUCCESS; 1946 } 1947 1948 /** 1949 * e1000_read_mac_addr_82575 - Read device MAC address 1950 * @hw: pointer to the HW structure 1951 **/ 1952 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) 1953 { 1954 s32 ret_val = E1000_SUCCESS; 1955 1956 DEBUGFUNC("e1000_read_mac_addr_82575"); 1957 1958 /* 1959 * If there's an alternate MAC address place it in RAR0 1960 * so that it will override the Si installed default perm 1961 * address. 1962 */ 1963 ret_val = e1000_check_alt_mac_addr_generic(hw); 1964 if (ret_val) 1965 goto out; 1966 1967 ret_val = e1000_read_mac_addr_generic(hw); 1968 1969 out: 1970 return ret_val; 1971 } 1972 1973 /** 1974 * e1000_config_collision_dist_82575 - Configure collision distance 1975 * @hw: pointer to the HW structure 1976 * 1977 * Configures the collision distance to the default value and is used 1978 * during link setup. 1979 **/ 1980 static void e1000_config_collision_dist_82575(struct e1000_hw *hw) 1981 { 1982 u32 tctl_ext; 1983 1984 DEBUGFUNC("e1000_config_collision_dist_82575"); 1985 1986 tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); 1987 1988 tctl_ext &= ~E1000_TCTL_EXT_COLD; 1989 tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; 1990 1991 E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); 1992 E1000_WRITE_FLUSH(hw); 1993 } 1994 1995 /** 1996 * e1000_power_down_phy_copper_82575 - Remove link during PHY power down 1997 * @hw: pointer to the HW structure 1998 * 1999 * In the case of a PHY power down to save power, or to turn off link during a 2000 * driver unload, or wake on lan is not enabled, remove the link. 2001 **/ 2002 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) 2003 { 2004 struct e1000_phy_info *phy = &hw->phy; 2005 2006 if (!(phy->ops.check_reset_block)) 2007 return; 2008 2009 /* If the management interface is not enabled, then power down */ 2010 if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) 2011 e1000_power_down_phy_copper(hw); 2012 2013 return; 2014 } 2015 2016 /** 2017 * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters 2018 * @hw: pointer to the HW structure 2019 * 2020 * Clears the hardware counters by reading the counter registers. 2021 **/ 2022 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) 2023 { 2024 DEBUGFUNC("e1000_clear_hw_cntrs_82575"); 2025 2026 e1000_clear_hw_cntrs_base_generic(hw); 2027 2028 E1000_READ_REG(hw, E1000_PRC64); 2029 E1000_READ_REG(hw, E1000_PRC127); 2030 E1000_READ_REG(hw, E1000_PRC255); 2031 E1000_READ_REG(hw, E1000_PRC511); 2032 E1000_READ_REG(hw, E1000_PRC1023); 2033 E1000_READ_REG(hw, E1000_PRC1522); 2034 E1000_READ_REG(hw, E1000_PTC64); 2035 E1000_READ_REG(hw, E1000_PTC127); 2036 E1000_READ_REG(hw, E1000_PTC255); 2037 E1000_READ_REG(hw, E1000_PTC511); 2038 E1000_READ_REG(hw, E1000_PTC1023); 2039 E1000_READ_REG(hw, E1000_PTC1522); 2040 2041 E1000_READ_REG(hw, E1000_ALGNERRC); 2042 E1000_READ_REG(hw, E1000_RXERRC); 2043 E1000_READ_REG(hw, E1000_TNCRS); 2044 E1000_READ_REG(hw, E1000_CEXTERR); 2045 E1000_READ_REG(hw, E1000_TSCTC); 2046 E1000_READ_REG(hw, E1000_TSCTFC); 2047 2048 E1000_READ_REG(hw, E1000_MGTPRC); 2049 E1000_READ_REG(hw, E1000_MGTPDC); 2050 E1000_READ_REG(hw, E1000_MGTPTC); 2051 2052 E1000_READ_REG(hw, E1000_IAC); 2053 E1000_READ_REG(hw, E1000_ICRXOC); 2054 2055 E1000_READ_REG(hw, E1000_ICRXPTC); 2056 E1000_READ_REG(hw, E1000_ICRXATC); 2057 E1000_READ_REG(hw, E1000_ICTXPTC); 2058 E1000_READ_REG(hw, E1000_ICTXATC); 2059 E1000_READ_REG(hw, E1000_ICTXQEC); 2060 E1000_READ_REG(hw, E1000_ICTXQMTC); 2061 E1000_READ_REG(hw, E1000_ICRXDMTC); 2062 2063 E1000_READ_REG(hw, E1000_CBTMPC); 2064 E1000_READ_REG(hw, E1000_HTDPMC); 2065 E1000_READ_REG(hw, E1000_CBRMPC); 2066 E1000_READ_REG(hw, E1000_RPTHC); 2067 E1000_READ_REG(hw, E1000_HGPTC); 2068 E1000_READ_REG(hw, E1000_HTCBDPC); 2069 E1000_READ_REG(hw, E1000_HGORCL); 2070 E1000_READ_REG(hw, E1000_HGORCH); 2071 E1000_READ_REG(hw, E1000_HGOTCL); 2072 E1000_READ_REG(hw, E1000_HGOTCH); 2073 E1000_READ_REG(hw, E1000_LENERRS); 2074 2075 /* This register should not be read in copper configurations */ 2076 if ((hw->phy.media_type == e1000_media_type_internal_serdes) || 2077 e1000_sgmii_active_82575(hw)) 2078 E1000_READ_REG(hw, E1000_SCVPC); 2079 } 2080 2081 /** 2082 * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable 2083 * @hw: pointer to the HW structure 2084 * 2085 * After rx enable if managability is enabled then there is likely some 2086 * bad data at the start of the fifo and possibly in the DMA fifo. This 2087 * function clears the fifos and flushes any packets that came in as rx was 2088 * being enabled. 2089 **/ 2090 void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) 2091 { 2092 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 2093 int i, ms_wait; 2094 2095 DEBUGFUNC("e1000_rx_fifo_workaround_82575"); 2096 if (hw->mac.type != e1000_82575 || 2097 !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 2098 return; 2099 2100 /* Disable all Rx queues */ 2101 for (i = 0; i < 4; i++) { 2102 rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2103 E1000_WRITE_REG(hw, E1000_RXDCTL(i), 2104 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 2105 } 2106 /* Poll all queues to verify they have shut down */ 2107 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 2108 msec_delay(1); 2109 rx_enabled = 0; 2110 for (i = 0; i < 4; i++) 2111 rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); 2112 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 2113 break; 2114 } 2115 2116 if (ms_wait == 10) 2117 DEBUGOUT("Queue disable timed out after 10ms\n"); 2118 2119 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 2120 * incoming packets are rejected. Set enable and wait 2ms so that 2121 * any packet that was coming in as RCTL.EN was set is flushed 2122 */ 2123 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 2124 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 2125 2126 rlpml = E1000_READ_REG(hw, E1000_RLPML); 2127 E1000_WRITE_REG(hw, E1000_RLPML, 0); 2128 2129 rctl = E1000_READ_REG(hw, E1000_RCTL); 2130 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 2131 temp_rctl |= E1000_RCTL_LPE; 2132 2133 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); 2134 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); 2135 E1000_WRITE_FLUSH(hw); 2136 msec_delay(2); 2137 2138 /* Enable Rx queues that were previously enabled and restore our 2139 * previous state 2140 */ 2141 for (i = 0; i < 4; i++) 2142 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); 2143 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2144 E1000_WRITE_FLUSH(hw); 2145 2146 E1000_WRITE_REG(hw, E1000_RLPML, rlpml); 2147 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); 2148 2149 /* Flush receive errors generated by workaround */ 2150 E1000_READ_REG(hw, E1000_ROC); 2151 E1000_READ_REG(hw, E1000_RNBC); 2152 E1000_READ_REG(hw, E1000_MPC); 2153 } 2154 2155 /** 2156 * e1000_set_pcie_completion_timeout - set pci-e completion timeout 2157 * @hw: pointer to the HW structure 2158 * 2159 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 2160 * however the hardware default for these parts is 500us to 1ms which is less 2161 * than the 10ms recommended by the pci-e spec. To address this we need to 2162 * increase the value to either 10ms to 200ms for capability version 1 config, 2163 * or 16ms to 55ms for version 2. 2164 **/ 2165 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) 2166 { 2167 u32 gcr = E1000_READ_REG(hw, E1000_GCR); 2168 s32 ret_val = E1000_SUCCESS; 2169 u16 pcie_devctl2; 2170 2171 /* only take action if timeout value is defaulted to 0 */ 2172 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 2173 goto out; 2174 2175 /* 2176 * if capababilities version is type 1 we can write the 2177 * timeout of 10ms to 200ms through the GCR register 2178 */ 2179 if (!(gcr & E1000_GCR_CAP_VER2)) { 2180 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 2181 goto out; 2182 } 2183 2184 /* 2185 * for version 2 capabilities we need to write the config space 2186 * directly in order to set the completion timeout value for 2187 * 16ms to 55ms 2188 */ 2189 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2190 &pcie_devctl2); 2191 if (ret_val) 2192 goto out; 2193 2194 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 2195 2196 ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2197 &pcie_devctl2); 2198 out: 2199 /* disable completion timeout resend */ 2200 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 2201 2202 E1000_WRITE_REG(hw, E1000_GCR, gcr); 2203 return ret_val; 2204 } 2205 2206 /** 2207 * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 2208 * @hw: pointer to the hardware struct 2209 * @enable: state to enter, either enabled or disabled 2210 * @pf: Physical Function pool - do not set anti-spoofing for the PF 2211 * 2212 * enables/disables L2 switch anti-spoofing functionality. 2213 **/ 2214 void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 2215 { 2216 u32 dtxswc; 2217 2218 switch (hw->mac.type) { 2219 case e1000_82576: 2220 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); 2221 if (enable) { 2222 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | 2223 E1000_DTXSWC_VLAN_SPOOF_MASK); 2224 /* The PF can spoof - it has to in order to 2225 * support emulation mode NICs */ 2226 dtxswc ^= (1 << pf | 1 << (pf + 2227 E1000_DTXSWC_VLAN_SPOOF_SHIFT)); 2228 } else { 2229 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 2230 E1000_DTXSWC_VLAN_SPOOF_MASK); 2231 } 2232 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); 2233 break; 2234 case e1000_i350: 2235 case e1000_i354: 2236 dtxswc = E1000_READ_REG(hw, E1000_TXSWC); 2237 if (enable) { 2238 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | 2239 E1000_DTXSWC_VLAN_SPOOF_MASK); 2240 /* The PF can spoof - it has to in order to 2241 * support emulation mode NICs 2242 */ 2243 dtxswc ^= (1 << pf | 1 << (pf + 2244 E1000_DTXSWC_VLAN_SPOOF_SHIFT)); 2245 } else { 2246 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 2247 E1000_DTXSWC_VLAN_SPOOF_MASK); 2248 } 2249 E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); 2250 default: 2251 break; 2252 } 2253 } 2254 2255 /** 2256 * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback 2257 * @hw: pointer to the hardware struct 2258 * @enable: state to enter, either enabled or disabled 2259 * 2260 * enables/disables L2 switch loopback functionality. 2261 **/ 2262 void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 2263 { 2264 u32 dtxswc; 2265 2266 switch (hw->mac.type) { 2267 case e1000_82576: 2268 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); 2269 if (enable) 2270 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2271 else 2272 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2273 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); 2274 break; 2275 case e1000_i350: 2276 case e1000_i354: 2277 dtxswc = E1000_READ_REG(hw, E1000_TXSWC); 2278 if (enable) 2279 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2280 else 2281 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2282 E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); 2283 break; 2284 default: 2285 /* Currently no other hardware supports loopback */ 2286 break; 2287 } 2288 2289 2290 } 2291 2292 /** 2293 * e1000_vmdq_set_replication_pf - enable or disable vmdq replication 2294 * @hw: pointer to the hardware struct 2295 * @enable: state to enter, either enabled or disabled 2296 * 2297 * enables/disables replication of packets across multiple pools. 2298 **/ 2299 void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 2300 { 2301 u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); 2302 2303 if (enable) 2304 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 2305 else 2306 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 2307 2308 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); 2309 } 2310 2311 /** 2312 * e1000_read_phy_reg_82580 - Read 82580 MDI control register 2313 * @hw: pointer to the HW structure 2314 * @offset: register offset to be read 2315 * @data: pointer to the read data 2316 * 2317 * Reads the MDI control register in the PHY at offset and stores the 2318 * information read to data. 2319 **/ 2320 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 2321 { 2322 s32 ret_val; 2323 2324 DEBUGFUNC("e1000_read_phy_reg_82580"); 2325 2326 ret_val = hw->phy.ops.acquire(hw); 2327 if (ret_val) 2328 goto out; 2329 2330 ret_val = e1000_read_phy_reg_mdic(hw, offset, data); 2331 2332 hw->phy.ops.release(hw); 2333 2334 out: 2335 return ret_val; 2336 } 2337 2338 /** 2339 * e1000_write_phy_reg_82580 - Write 82580 MDI control register 2340 * @hw: pointer to the HW structure 2341 * @offset: register offset to write to 2342 * @data: data to write to register at offset 2343 * 2344 * Writes data to MDI control register in the PHY at offset. 2345 **/ 2346 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 2347 { 2348 s32 ret_val; 2349 2350 DEBUGFUNC("e1000_write_phy_reg_82580"); 2351 2352 ret_val = hw->phy.ops.acquire(hw); 2353 if (ret_val) 2354 goto out; 2355 2356 ret_val = e1000_write_phy_reg_mdic(hw, offset, data); 2357 2358 hw->phy.ops.release(hw); 2359 2360 out: 2361 return ret_val; 2362 } 2363 2364 /** 2365 * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 2366 * @hw: pointer to the HW structure 2367 * 2368 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 2369 * the values found in the EEPROM. This addresses an issue in which these 2370 * bits are not restored from EEPROM after reset. 2371 **/ 2372 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) 2373 { 2374 s32 ret_val = E1000_SUCCESS; 2375 u32 mdicnfg; 2376 u16 nvm_data = 0; 2377 2378 DEBUGFUNC("e1000_reset_mdicnfg_82580"); 2379 2380 if (hw->mac.type != e1000_82580) 2381 goto out; 2382 if (!e1000_sgmii_active_82575(hw)) 2383 goto out; 2384 2385 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2386 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2387 &nvm_data); 2388 if (ret_val) { 2389 DEBUGOUT("NVM Read Error\n"); 2390 goto out; 2391 } 2392 2393 mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); 2394 if (nvm_data & NVM_WORD24_EXT_MDIO) 2395 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 2396 if (nvm_data & NVM_WORD24_COM_MDIO) 2397 mdicnfg |= E1000_MDICNFG_COM_MDIO; 2398 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); 2399 out: 2400 return ret_val; 2401 } 2402 2403 /** 2404 * e1000_reset_hw_82580 - Reset hardware 2405 * @hw: pointer to the HW structure 2406 * 2407 * This resets function or entire device (all ports, etc.) 2408 * to a known state. 2409 **/ 2410 static s32 e1000_reset_hw_82580(struct e1000_hw *hw) 2411 { 2412 s32 ret_val = E1000_SUCCESS; 2413 /* BH SW mailbox bit in SW_FW_SYNC */ 2414 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 2415 u32 ctrl; 2416 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2417 2418 DEBUGFUNC("e1000_reset_hw_82580"); 2419 2420 hw->dev_spec._82575.global_device_reset = FALSE; 2421 2422 /* 82580 does not reliably do global_device_reset due to hw errata */ 2423 if (hw->mac.type == e1000_82580) 2424 global_device_reset = FALSE; 2425 2426 /* Get current control state. */ 2427 ctrl = E1000_READ_REG(hw, E1000_CTRL); 2428 2429 /* 2430 * Prevent the PCI-E bus from sticking if there is no TLP connection 2431 * on the last TLP read/write transaction when MAC is reset. 2432 */ 2433 ret_val = e1000_disable_pcie_master_generic(hw); 2434 if (ret_val) 2435 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 2436 2437 DEBUGOUT("Masking off all interrupts\n"); 2438 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 2439 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2440 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 2441 E1000_WRITE_FLUSH(hw); 2442 2443 msec_delay(10); 2444 2445 /* Determine whether or not a global dev reset is requested */ 2446 if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, 2447 swmbsw_mask)) 2448 global_device_reset = FALSE; 2449 2450 if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & 2451 E1000_STAT_DEV_RST_SET)) 2452 ctrl |= E1000_CTRL_DEV_RST; 2453 else 2454 ctrl |= E1000_CTRL_RST; 2455 2456 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 2457 E1000_WRITE_FLUSH(hw); 2458 2459 /* Add delay to insure DEV_RST has time to complete */ 2460 if (global_device_reset) 2461 msec_delay(5); 2462 2463 ret_val = e1000_get_auto_rd_done_generic(hw); 2464 if (ret_val) { 2465 /* 2466 * When auto config read does not complete, do not 2467 * return with an error. This can happen in situations 2468 * where there is no eeprom and prevents getting link. 2469 */ 2470 DEBUGOUT("Auto Read Done did not complete\n"); 2471 } 2472 2473 /* If EEPROM is not present, run manual init scripts */ 2474 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) 2475 e1000_reset_init_script_82575(hw); 2476 2477 /* clear global device reset status bit */ 2478 E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); 2479 2480 /* Clear any pending interrupt events. */ 2481 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 2482 E1000_READ_REG(hw, E1000_ICR); 2483 2484 ret_val = e1000_reset_mdicnfg_82580(hw); 2485 if (ret_val) 2486 DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); 2487 2488 /* Install any alternate MAC address into RAR0 */ 2489 ret_val = e1000_check_alt_mac_addr_generic(hw); 2490 2491 /* Release semaphore */ 2492 if (global_device_reset) 2493 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); 2494 2495 return ret_val; 2496 } 2497 2498 /** 2499 * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size 2500 * @data: data received by reading RXPBS register 2501 * 2502 * The 82580 uses a table based approach for packet buffer allocation sizes. 2503 * This function converts the retrieved value into the correct table value 2504 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 2505 * 0x0 36 72 144 1 2 4 8 16 2506 * 0x8 35 70 140 rsv rsv rsv rsv rsv 2507 */ 2508 u16 e1000_rxpbs_adjust_82580(u32 data) 2509 { 2510 u16 ret_val = 0; 2511 2512 if (data < E1000_82580_RXPBS_TABLE_SIZE) 2513 ret_val = e1000_82580_rxpbs_table[data]; 2514 2515 return ret_val; 2516 } 2517 2518 /** 2519 * e1000_validate_nvm_checksum_with_offset - Validate EEPROM 2520 * checksum 2521 * @hw: pointer to the HW structure 2522 * @offset: offset in words of the checksum protected region 2523 * 2524 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 2525 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 2526 **/ 2527 s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2528 { 2529 s32 ret_val = E1000_SUCCESS; 2530 u16 checksum = 0; 2531 u16 i, nvm_data; 2532 2533 DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); 2534 2535 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 2536 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2537 if (ret_val) { 2538 DEBUGOUT("NVM Read Error\n"); 2539 goto out; 2540 } 2541 checksum += nvm_data; 2542 } 2543 2544 if (checksum != (u16) NVM_SUM) { 2545 DEBUGOUT("NVM Checksum Invalid\n"); 2546 ret_val = -E1000_ERR_NVM; 2547 goto out; 2548 } 2549 2550 out: 2551 return ret_val; 2552 } 2553 2554 /** 2555 * e1000_update_nvm_checksum_with_offset - Update EEPROM 2556 * checksum 2557 * @hw: pointer to the HW structure 2558 * @offset: offset in words of the checksum protected region 2559 * 2560 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 2561 * up to the checksum. Then calculates the EEPROM checksum and writes the 2562 * value to the EEPROM. 2563 **/ 2564 s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2565 { 2566 s32 ret_val; 2567 u16 checksum = 0; 2568 u16 i, nvm_data; 2569 2570 DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); 2571 2572 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 2573 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2574 if (ret_val) { 2575 DEBUGOUT("NVM Read Error while updating checksum.\n"); 2576 goto out; 2577 } 2578 checksum += nvm_data; 2579 } 2580 checksum = (u16) NVM_SUM - checksum; 2581 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 2582 &checksum); 2583 if (ret_val) 2584 DEBUGOUT("NVM Write Error while updating checksum.\n"); 2585 2586 out: 2587 return ret_val; 2588 } 2589 2590 /** 2591 * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum 2592 * @hw: pointer to the HW structure 2593 * 2594 * Calculates the EEPROM section checksum by reading/adding each word of 2595 * the EEPROM and then verifies that the sum of the EEPROM is 2596 * equal to 0xBABA. 2597 **/ 2598 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) 2599 { 2600 s32 ret_val = E1000_SUCCESS; 2601 u16 eeprom_regions_count = 1; 2602 u16 j, nvm_data; 2603 u16 nvm_offset; 2604 2605 DEBUGFUNC("e1000_validate_nvm_checksum_82580"); 2606 2607 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2608 if (ret_val) { 2609 DEBUGOUT("NVM Read Error\n"); 2610 goto out; 2611 } 2612 2613 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2614 /* if chekcsums compatibility bit is set validate checksums 2615 * for all 4 ports. */ 2616 eeprom_regions_count = 4; 2617 } 2618 2619 for (j = 0; j < eeprom_regions_count; j++) { 2620 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2621 ret_val = e1000_validate_nvm_checksum_with_offset(hw, 2622 nvm_offset); 2623 if (ret_val != E1000_SUCCESS) 2624 goto out; 2625 } 2626 2627 out: 2628 return ret_val; 2629 } 2630 2631 /** 2632 * e1000_update_nvm_checksum_82580 - Update EEPROM checksum 2633 * @hw: pointer to the HW structure 2634 * 2635 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2636 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2637 * checksum and writes the value to the EEPROM. 2638 **/ 2639 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) 2640 { 2641 s32 ret_val; 2642 u16 j, nvm_data; 2643 u16 nvm_offset; 2644 2645 DEBUGFUNC("e1000_update_nvm_checksum_82580"); 2646 2647 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2648 if (ret_val) { 2649 DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); 2650 goto out; 2651 } 2652 2653 if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { 2654 /* set compatibility bit to validate checksums appropriately */ 2655 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 2656 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2657 &nvm_data); 2658 if (ret_val) { 2659 DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); 2660 goto out; 2661 } 2662 } 2663 2664 for (j = 0; j < 4; j++) { 2665 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2666 ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); 2667 if (ret_val) 2668 goto out; 2669 } 2670 2671 out: 2672 return ret_val; 2673 } 2674 2675 /** 2676 * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum 2677 * @hw: pointer to the HW structure 2678 * 2679 * Calculates the EEPROM section checksum by reading/adding each word of 2680 * the EEPROM and then verifies that the sum of the EEPROM is 2681 * equal to 0xBABA. 2682 **/ 2683 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) 2684 { 2685 s32 ret_val = E1000_SUCCESS; 2686 u16 j; 2687 u16 nvm_offset; 2688 2689 DEBUGFUNC("e1000_validate_nvm_checksum_i350"); 2690 2691 for (j = 0; j < 4; j++) { 2692 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2693 ret_val = e1000_validate_nvm_checksum_with_offset(hw, 2694 nvm_offset); 2695 if (ret_val != E1000_SUCCESS) 2696 goto out; 2697 } 2698 2699 out: 2700 return ret_val; 2701 } 2702 2703 /** 2704 * e1000_update_nvm_checksum_i350 - Update EEPROM checksum 2705 * @hw: pointer to the HW structure 2706 * 2707 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2708 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2709 * checksum and writes the value to the EEPROM. 2710 **/ 2711 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) 2712 { 2713 s32 ret_val = E1000_SUCCESS; 2714 u16 j; 2715 u16 nvm_offset; 2716 2717 DEBUGFUNC("e1000_update_nvm_checksum_i350"); 2718 2719 for (j = 0; j < 4; j++) { 2720 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2721 ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); 2722 if (ret_val != E1000_SUCCESS) 2723 goto out; 2724 } 2725 2726 out: 2727 return ret_val; 2728 } 2729 2730 /** 2731 * __e1000_access_emi_reg - Read/write EMI register 2732 * @hw: pointer to the HW structure 2733 * @addr: EMI address to program 2734 * @data: pointer to value to read/write from/to the EMI address 2735 * @read: boolean flag to indicate read or write 2736 **/ 2737 static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, 2738 u16 *data, bool read) 2739 { 2740 s32 ret_val = E1000_SUCCESS; 2741 2742 DEBUGFUNC("__e1000_access_emi_reg"); 2743 2744 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2745 if (ret_val) 2746 return ret_val; 2747 2748 if (read) 2749 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); 2750 else 2751 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); 2752 2753 return ret_val; 2754 } 2755 2756 /** 2757 * e1000_read_emi_reg - Read Extended Management Interface register 2758 * @hw: pointer to the HW structure 2759 * @addr: EMI address to program 2760 * @data: value to be read from the EMI address 2761 **/ 2762 s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) 2763 { 2764 DEBUGFUNC("e1000_read_emi_reg"); 2765 2766 return __e1000_access_emi_reg(hw, addr, data, TRUE); 2767 } 2768 2769 /** 2770 * e1000_set_eee_i350 - Enable/disable EEE support 2771 * @hw: pointer to the HW structure 2772 * 2773 * Enable/disable EEE based on setting in dev_spec structure. 2774 * 2775 **/ 2776 s32 e1000_set_eee_i350(struct e1000_hw *hw) 2777 { 2778 s32 ret_val = E1000_SUCCESS; 2779 u32 ipcnfg, eeer; 2780 2781 DEBUGFUNC("e1000_set_eee_i350"); 2782 2783 if ((hw->mac.type < e1000_i350) || 2784 (hw->phy.media_type != e1000_media_type_copper)) 2785 goto out; 2786 ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); 2787 eeer = E1000_READ_REG(hw, E1000_EEER); 2788 2789 /* enable or disable per user setting */ 2790 if (!(hw->dev_spec._82575.eee_disable)) { 2791 u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); 2792 2793 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2794 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2795 E1000_EEER_LPI_FC); 2796 2797 /* This bit should not be set in normal operation. */ 2798 if (eee_su & E1000_EEE_SU_LPI_CLK_STP) 2799 DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); 2800 } else { 2801 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2802 eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2803 E1000_EEER_LPI_FC); 2804 } 2805 E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); 2806 E1000_WRITE_REG(hw, E1000_EEER, eeer); 2807 E1000_READ_REG(hw, E1000_IPCNFG); 2808 E1000_READ_REG(hw, E1000_EEER); 2809 out: 2810 2811 return ret_val; 2812 } 2813 2814 /** 2815 * e1000_set_eee_i354 - Enable/disable EEE support 2816 * @hw: pointer to the HW structure 2817 * 2818 * Enable/disable EEE legacy mode based on setting in dev_spec structure. 2819 * 2820 **/ 2821 s32 e1000_set_eee_i354(struct e1000_hw *hw) 2822 { 2823 struct e1000_phy_info *phy = &hw->phy; 2824 s32 ret_val = E1000_SUCCESS; 2825 u16 phy_data; 2826 2827 DEBUGFUNC("e1000_set_eee_i354"); 2828 2829 if ((hw->phy.media_type != e1000_media_type_copper) || 2830 (phy->id != M88E1545_E_PHY_ID)) 2831 goto out; 2832 2833 if (!hw->dev_spec._82575.eee_disable) { 2834 /* Switch to PHY page 18. */ 2835 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18); 2836 if (ret_val) 2837 goto out; 2838 2839 ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1, 2840 &phy_data); 2841 if (ret_val) 2842 goto out; 2843 2844 phy_data |= E1000_M88E1545_EEE_CTRL_1_MS; 2845 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1, 2846 phy_data); 2847 if (ret_val) 2848 goto out; 2849 2850 /* Return the PHY to page 0. */ 2851 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0); 2852 if (ret_val) 2853 goto out; 2854 2855 /* Turn on EEE advertisement. */ 2856 ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2857 E1000_EEE_ADV_DEV_I354, 2858 &phy_data); 2859 if (ret_val) 2860 goto out; 2861 2862 phy_data |= E1000_EEE_ADV_100_SUPPORTED | 2863 E1000_EEE_ADV_1000_SUPPORTED; 2864 ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2865 E1000_EEE_ADV_DEV_I354, 2866 phy_data); 2867 } else { 2868 /* Turn off EEE advertisement. */ 2869 ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2870 E1000_EEE_ADV_DEV_I354, 2871 &phy_data); 2872 if (ret_val) 2873 goto out; 2874 2875 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | 2876 E1000_EEE_ADV_1000_SUPPORTED); 2877 ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2878 E1000_EEE_ADV_DEV_I354, 2879 phy_data); 2880 } 2881 2882 out: 2883 return ret_val; 2884 } 2885 2886 /** 2887 * e1000_get_eee_status_i354 - Get EEE status 2888 * @hw: pointer to the HW structure 2889 * @status: EEE status 2890 * 2891 * Get EEE status by guessing based on whether Tx or Rx LPI indications have 2892 * been received. 2893 **/ 2894 s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) 2895 { 2896 struct e1000_phy_info *phy = &hw->phy; 2897 s32 ret_val = E1000_SUCCESS; 2898 u16 phy_data; 2899 2900 DEBUGFUNC("e1000_get_eee_status_i354"); 2901 2902 /* Check if EEE is supported on this device. */ 2903 if ((hw->phy.media_type != e1000_media_type_copper) || 2904 (phy->id != M88E1545_E_PHY_ID)) 2905 goto out; 2906 2907 ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, 2908 E1000_PCS_STATUS_DEV_I354, 2909 &phy_data); 2910 if (ret_val) 2911 goto out; 2912 2913 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | 2914 E1000_PCS_STATUS_RX_LPI_RCVD) ? TRUE : FALSE; 2915 2916 out: 2917 return ret_val; 2918 } 2919 2920 /* Due to a hw errata, if the host tries to configure the VFTA register 2921 * while performing queries from the BMC or DMA, then the VFTA in some 2922 * cases won't be written. 2923 */ 2924 2925 /** 2926 * e1000_clear_vfta_i350 - Clear VLAN filter table 2927 * @hw: pointer to the HW structure 2928 * 2929 * Clears the register array which contains the VLAN filter table by 2930 * setting all the values to 0. 2931 **/ 2932 void e1000_clear_vfta_i350(struct e1000_hw *hw) 2933 { 2934 u32 offset; 2935 int i; 2936 2937 DEBUGFUNC("e1000_clear_vfta_350"); 2938 2939 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 2940 for (i = 0; i < 10; i++) 2941 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); 2942 2943 E1000_WRITE_FLUSH(hw); 2944 } 2945 } 2946 2947 /** 2948 * e1000_write_vfta_i350 - Write value to VLAN filter table 2949 * @hw: pointer to the HW structure 2950 * @offset: register offset in VLAN filter table 2951 * @value: register value written to VLAN filter table 2952 * 2953 * Writes value at the given offset in the register array which stores 2954 * the VLAN filter table. 2955 **/ 2956 void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) 2957 { 2958 int i; 2959 2960 DEBUGFUNC("e1000_write_vfta_350"); 2961 2962 for (i = 0; i < 10; i++) 2963 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); 2964 2965 E1000_WRITE_FLUSH(hw); 2966 } 2967 2968 2969 /** 2970 * e1000_set_i2c_bb - Enable I2C bit-bang 2971 * @hw: pointer to the HW structure 2972 * 2973 * Enable I2C bit-bang interface 2974 * 2975 **/ 2976 s32 e1000_set_i2c_bb(struct e1000_hw *hw) 2977 { 2978 s32 ret_val = E1000_SUCCESS; 2979 u32 ctrl_ext, i2cparams; 2980 2981 DEBUGFUNC("e1000_set_i2c_bb"); 2982 2983 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2984 ctrl_ext |= E1000_CTRL_I2C_ENA; 2985 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 2986 E1000_WRITE_FLUSH(hw); 2987 2988 i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); 2989 i2cparams |= E1000_I2CBB_EN; 2990 i2cparams |= E1000_I2C_DATA_OE_N; 2991 i2cparams |= E1000_I2C_CLK_OE_N; 2992 E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); 2993 E1000_WRITE_FLUSH(hw); 2994 2995 return ret_val; 2996 } 2997 2998 /** 2999 * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C 3000 * @hw: pointer to hardware structure 3001 * @byte_offset: byte offset to read 3002 * @dev_addr: device address 3003 * @data: value read 3004 * 3005 * Performs byte read operation over I2C interface at 3006 * a specified device address. 3007 **/ 3008 s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, 3009 u8 dev_addr, u8 *data) 3010 { 3011 s32 status = E1000_SUCCESS; 3012 u32 max_retry = 10; 3013 u32 retry = 1; 3014 u16 swfw_mask = 0; 3015 3016 bool nack = TRUE; 3017 3018 DEBUGFUNC("e1000_read_i2c_byte_generic"); 3019 3020 swfw_mask = E1000_SWFW_PHY0_SM; 3021 3022 do { 3023 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) 3024 != E1000_SUCCESS) { 3025 status = E1000_ERR_SWFW_SYNC; 3026 goto read_byte_out; 3027 } 3028 3029 e1000_i2c_start(hw); 3030 3031 /* Device Address and write indication */ 3032 status = e1000_clock_out_i2c_byte(hw, dev_addr); 3033 if (status != E1000_SUCCESS) 3034 goto fail; 3035 3036 status = e1000_get_i2c_ack(hw); 3037 if (status != E1000_SUCCESS) 3038 goto fail; 3039 3040 status = e1000_clock_out_i2c_byte(hw, byte_offset); 3041 if (status != E1000_SUCCESS) 3042 goto fail; 3043 3044 status = e1000_get_i2c_ack(hw); 3045 if (status != E1000_SUCCESS) 3046 goto fail; 3047 3048 e1000_i2c_start(hw); 3049 3050 /* Device Address and read indication */ 3051 status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); 3052 if (status != E1000_SUCCESS) 3053 goto fail; 3054 3055 status = e1000_get_i2c_ack(hw); 3056 if (status != E1000_SUCCESS) 3057 goto fail; 3058 3059 status = e1000_clock_in_i2c_byte(hw, data); 3060 if (status != E1000_SUCCESS) 3061 goto fail; 3062 3063 status = e1000_clock_out_i2c_bit(hw, nack); 3064 if (status != E1000_SUCCESS) 3065 goto fail; 3066 3067 e1000_i2c_stop(hw); 3068 break; 3069 3070 fail: 3071 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 3072 msec_delay(100); 3073 e1000_i2c_bus_clear(hw); 3074 retry++; 3075 if (retry < max_retry) 3076 DEBUGOUT("I2C byte read error - Retrying.\n"); 3077 else 3078 DEBUGOUT("I2C byte read error.\n"); 3079 3080 } while (retry < max_retry); 3081 3082 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 3083 3084 read_byte_out: 3085 3086 return status; 3087 } 3088 3089 /** 3090 * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C 3091 * @hw: pointer to hardware structure 3092 * @byte_offset: byte offset to write 3093 * @dev_addr: device address 3094 * @data: value to write 3095 * 3096 * Performs byte write operation over I2C interface at 3097 * a specified device address. 3098 **/ 3099 s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, 3100 u8 dev_addr, u8 data) 3101 { 3102 s32 status = E1000_SUCCESS; 3103 u32 max_retry = 1; 3104 u32 retry = 0; 3105 u16 swfw_mask = 0; 3106 3107 DEBUGFUNC("e1000_write_i2c_byte_generic"); 3108 3109 swfw_mask = E1000_SWFW_PHY0_SM; 3110 3111 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { 3112 status = E1000_ERR_SWFW_SYNC; 3113 goto write_byte_out; 3114 } 3115 3116 do { 3117 e1000_i2c_start(hw); 3118 3119 status = e1000_clock_out_i2c_byte(hw, dev_addr); 3120 if (status != E1000_SUCCESS) 3121 goto fail; 3122 3123 status = e1000_get_i2c_ack(hw); 3124 if (status != E1000_SUCCESS) 3125 goto fail; 3126 3127 status = e1000_clock_out_i2c_byte(hw, byte_offset); 3128 if (status != E1000_SUCCESS) 3129 goto fail; 3130 3131 status = e1000_get_i2c_ack(hw); 3132 if (status != E1000_SUCCESS) 3133 goto fail; 3134 3135 status = e1000_clock_out_i2c_byte(hw, data); 3136 if (status != E1000_SUCCESS) 3137 goto fail; 3138 3139 status = e1000_get_i2c_ack(hw); 3140 if (status != E1000_SUCCESS) 3141 goto fail; 3142 3143 e1000_i2c_stop(hw); 3144 break; 3145 3146 fail: 3147 e1000_i2c_bus_clear(hw); 3148 retry++; 3149 if (retry < max_retry) 3150 DEBUGOUT("I2C byte write error - Retrying.\n"); 3151 else 3152 DEBUGOUT("I2C byte write error.\n"); 3153 } while (retry < max_retry); 3154 3155 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 3156 3157 write_byte_out: 3158 3159 return status; 3160 } 3161 3162 /** 3163 * e1000_i2c_start - Sets I2C start condition 3164 * @hw: pointer to hardware structure 3165 * 3166 * Sets I2C start condition (High -> Low on SDA while SCL is High) 3167 **/ 3168 static void e1000_i2c_start(struct e1000_hw *hw) 3169 { 3170 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3171 3172 DEBUGFUNC("e1000_i2c_start"); 3173 3174 /* Start condition must begin with data and clock high */ 3175 e1000_set_i2c_data(hw, &i2cctl, 1); 3176 e1000_raise_i2c_clk(hw, &i2cctl); 3177 3178 /* Setup time for start condition (4.7us) */ 3179 usec_delay(E1000_I2C_T_SU_STA); 3180 3181 e1000_set_i2c_data(hw, &i2cctl, 0); 3182 3183 /* Hold time for start condition (4us) */ 3184 usec_delay(E1000_I2C_T_HD_STA); 3185 3186 e1000_lower_i2c_clk(hw, &i2cctl); 3187 3188 /* Minimum low period of clock is 4.7 us */ 3189 usec_delay(E1000_I2C_T_LOW); 3190 3191 } 3192 3193 /** 3194 * e1000_i2c_stop - Sets I2C stop condition 3195 * @hw: pointer to hardware structure 3196 * 3197 * Sets I2C stop condition (Low -> High on SDA while SCL is High) 3198 **/ 3199 static void e1000_i2c_stop(struct e1000_hw *hw) 3200 { 3201 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3202 3203 DEBUGFUNC("e1000_i2c_stop"); 3204 3205 /* Stop condition must begin with data low and clock high */ 3206 e1000_set_i2c_data(hw, &i2cctl, 0); 3207 e1000_raise_i2c_clk(hw, &i2cctl); 3208 3209 /* Setup time for stop condition (4us) */ 3210 usec_delay(E1000_I2C_T_SU_STO); 3211 3212 e1000_set_i2c_data(hw, &i2cctl, 1); 3213 3214 /* bus free time between stop and start (4.7us)*/ 3215 usec_delay(E1000_I2C_T_BUF); 3216 } 3217 3218 /** 3219 * e1000_clock_in_i2c_byte - Clocks in one byte via I2C 3220 * @hw: pointer to hardware structure 3221 * @data: data byte to clock in 3222 * 3223 * Clocks in one byte data via I2C data/clock 3224 **/ 3225 static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) 3226 { 3227 s32 i; 3228 bool bit = 0; 3229 3230 DEBUGFUNC("e1000_clock_in_i2c_byte"); 3231 3232 *data = 0; 3233 for (i = 7; i >= 0; i--) { 3234 e1000_clock_in_i2c_bit(hw, &bit); 3235 *data |= bit << i; 3236 } 3237 3238 return E1000_SUCCESS; 3239 } 3240 3241 /** 3242 * e1000_clock_out_i2c_byte - Clocks out one byte via I2C 3243 * @hw: pointer to hardware structure 3244 * @data: data byte clocked out 3245 * 3246 * Clocks out one byte data via I2C data/clock 3247 **/ 3248 static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) 3249 { 3250 s32 status = E1000_SUCCESS; 3251 s32 i; 3252 u32 i2cctl; 3253 bool bit = 0; 3254 3255 DEBUGFUNC("e1000_clock_out_i2c_byte"); 3256 3257 for (i = 7; i >= 0; i--) { 3258 bit = (data >> i) & 0x1; 3259 status = e1000_clock_out_i2c_bit(hw, bit); 3260 3261 if (status != E1000_SUCCESS) 3262 break; 3263 } 3264 3265 /* Release SDA line (set high) */ 3266 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3267 3268 i2cctl |= E1000_I2C_DATA_OE_N; 3269 E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); 3270 E1000_WRITE_FLUSH(hw); 3271 3272 return status; 3273 } 3274 3275 /** 3276 * e1000_get_i2c_ack - Polls for I2C ACK 3277 * @hw: pointer to hardware structure 3278 * 3279 * Clocks in/out one bit via I2C data/clock 3280 **/ 3281 static s32 e1000_get_i2c_ack(struct e1000_hw *hw) 3282 { 3283 s32 status = E1000_SUCCESS; 3284 u32 i = 0; 3285 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3286 u32 timeout = 10; 3287 bool ack = TRUE; 3288 3289 DEBUGFUNC("e1000_get_i2c_ack"); 3290 3291 e1000_raise_i2c_clk(hw, &i2cctl); 3292 3293 /* Minimum high period of clock is 4us */ 3294 usec_delay(E1000_I2C_T_HIGH); 3295 3296 /* Wait until SCL returns high */ 3297 for (i = 0; i < timeout; i++) { 3298 usec_delay(1); 3299 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3300 if (i2cctl & E1000_I2C_CLK_IN) 3301 break; 3302 } 3303 if (!(i2cctl & E1000_I2C_CLK_IN)) 3304 return E1000_ERR_I2C; 3305 3306 ack = e1000_get_i2c_data(&i2cctl); 3307 if (ack) { 3308 DEBUGOUT("I2C ack was not received.\n"); 3309 status = E1000_ERR_I2C; 3310 } 3311 3312 e1000_lower_i2c_clk(hw, &i2cctl); 3313 3314 /* Minimum low period of clock is 4.7 us */ 3315 usec_delay(E1000_I2C_T_LOW); 3316 3317 return status; 3318 } 3319 3320 /** 3321 * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock 3322 * @hw: pointer to hardware structure 3323 * @data: read data value 3324 * 3325 * Clocks in one bit via I2C data/clock 3326 **/ 3327 static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) 3328 { 3329 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3330 3331 DEBUGFUNC("e1000_clock_in_i2c_bit"); 3332 3333 e1000_raise_i2c_clk(hw, &i2cctl); 3334 3335 /* Minimum high period of clock is 4us */ 3336 usec_delay(E1000_I2C_T_HIGH); 3337 3338 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3339 *data = e1000_get_i2c_data(&i2cctl); 3340 3341 e1000_lower_i2c_clk(hw, &i2cctl); 3342 3343 /* Minimum low period of clock is 4.7 us */ 3344 usec_delay(E1000_I2C_T_LOW); 3345 3346 return E1000_SUCCESS; 3347 } 3348 3349 /** 3350 * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock 3351 * @hw: pointer to hardware structure 3352 * @data: data value to write 3353 * 3354 * Clocks out one bit via I2C data/clock 3355 **/ 3356 static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) 3357 { 3358 s32 status; 3359 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3360 3361 DEBUGFUNC("e1000_clock_out_i2c_bit"); 3362 3363 status = e1000_set_i2c_data(hw, &i2cctl, data); 3364 if (status == E1000_SUCCESS) { 3365 e1000_raise_i2c_clk(hw, &i2cctl); 3366 3367 /* Minimum high period of clock is 4us */ 3368 usec_delay(E1000_I2C_T_HIGH); 3369 3370 e1000_lower_i2c_clk(hw, &i2cctl); 3371 3372 /* Minimum low period of clock is 4.7 us. 3373 * This also takes care of the data hold time. 3374 */ 3375 usec_delay(E1000_I2C_T_LOW); 3376 } else { 3377 status = E1000_ERR_I2C; 3378 DEBUGOUT1("I2C data was not set to %X\n", data); 3379 } 3380 3381 return status; 3382 } 3383 /** 3384 * e1000_raise_i2c_clk - Raises the I2C SCL clock 3385 * @hw: pointer to hardware structure 3386 * @i2cctl: Current value of I2CCTL register 3387 * 3388 * Raises the I2C clock line '0'->'1' 3389 **/ 3390 static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) 3391 { 3392 DEBUGFUNC("e1000_raise_i2c_clk"); 3393 3394 *i2cctl |= E1000_I2C_CLK_OUT; 3395 *i2cctl &= ~E1000_I2C_CLK_OE_N; 3396 E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); 3397 E1000_WRITE_FLUSH(hw); 3398 3399 /* SCL rise time (1000ns) */ 3400 usec_delay(E1000_I2C_T_RISE); 3401 } 3402 3403 /** 3404 * e1000_lower_i2c_clk - Lowers the I2C SCL clock 3405 * @hw: pointer to hardware structure 3406 * @i2cctl: Current value of I2CCTL register 3407 * 3408 * Lowers the I2C clock line '1'->'0' 3409 **/ 3410 static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) 3411 { 3412 3413 DEBUGFUNC("e1000_lower_i2c_clk"); 3414 3415 *i2cctl &= ~E1000_I2C_CLK_OUT; 3416 *i2cctl &= ~E1000_I2C_CLK_OE_N; 3417 E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); 3418 E1000_WRITE_FLUSH(hw); 3419 3420 /* SCL fall time (300ns) */ 3421 usec_delay(E1000_I2C_T_FALL); 3422 } 3423 3424 /** 3425 * e1000_set_i2c_data - Sets the I2C data bit 3426 * @hw: pointer to hardware structure 3427 * @i2cctl: Current value of I2CCTL register 3428 * @data: I2C data value (0 or 1) to set 3429 * 3430 * Sets the I2C data bit 3431 **/ 3432 static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) 3433 { 3434 s32 status = E1000_SUCCESS; 3435 3436 DEBUGFUNC("e1000_set_i2c_data"); 3437 3438 if (data) 3439 *i2cctl |= E1000_I2C_DATA_OUT; 3440 else 3441 *i2cctl &= ~E1000_I2C_DATA_OUT; 3442 3443 *i2cctl &= ~E1000_I2C_DATA_OE_N; 3444 *i2cctl |= E1000_I2C_CLK_OE_N; 3445 E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); 3446 E1000_WRITE_FLUSH(hw); 3447 3448 /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ 3449 usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); 3450 3451 *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3452 if (data != e1000_get_i2c_data(i2cctl)) { 3453 status = E1000_ERR_I2C; 3454 DEBUGOUT1("Error - I2C data was not set to %X.\n", data); 3455 } 3456 3457 return status; 3458 } 3459 3460 /** 3461 * e1000_get_i2c_data - Reads the I2C SDA data bit 3462 * @hw: pointer to hardware structure 3463 * @i2cctl: Current value of I2CCTL register 3464 * 3465 * Returns the I2C data bit value 3466 **/ 3467 static bool e1000_get_i2c_data(u32 *i2cctl) 3468 { 3469 bool data; 3470 3471 DEBUGFUNC("e1000_get_i2c_data"); 3472 3473 if (*i2cctl & E1000_I2C_DATA_IN) 3474 data = 1; 3475 else 3476 data = 0; 3477 3478 return data; 3479 } 3480 3481 /** 3482 * e1000_i2c_bus_clear - Clears the I2C bus 3483 * @hw: pointer to hardware structure 3484 * 3485 * Clears the I2C bus by sending nine clock pulses. 3486 * Used when data line is stuck low. 3487 **/ 3488 void e1000_i2c_bus_clear(struct e1000_hw *hw) 3489 { 3490 u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); 3491 u32 i; 3492 3493 DEBUGFUNC("e1000_i2c_bus_clear"); 3494 3495 e1000_i2c_start(hw); 3496 3497 e1000_set_i2c_data(hw, &i2cctl, 1); 3498 3499 for (i = 0; i < 9; i++) { 3500 e1000_raise_i2c_clk(hw, &i2cctl); 3501 3502 /* Min high period of clock is 4us */ 3503 usec_delay(E1000_I2C_T_HIGH); 3504 3505 e1000_lower_i2c_clk(hw, &i2cctl); 3506 3507 /* Min low period of clock is 4.7us*/ 3508 usec_delay(E1000_I2C_T_LOW); 3509 } 3510 3511 e1000_i2c_start(hw); 3512 3513 /* Put the i2c bus back to default state */ 3514 e1000_i2c_stop(hw); 3515 } 3516 3517