1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 /* 82562G 10/100 Network Connection 36 * 82562G-2 10/100 Network Connection 37 * 82562GT 10/100 Network Connection 38 * 82562GT-2 10/100 Network Connection 39 * 82562V 10/100 Network Connection 40 * 82562V-2 10/100 Network Connection 41 * 82566DC-2 Gigabit Network Connection 42 * 82566DC Gigabit Network Connection 43 * 82566DM-2 Gigabit Network Connection 44 * 82566DM Gigabit Network Connection 45 * 82566MC Gigabit Network Connection 46 * 82566MM Gigabit Network Connection 47 * 82567LM Gigabit Network Connection 48 * 82567LF Gigabit Network Connection 49 * 82567V Gigabit Network Connection 50 * 82567LM-2 Gigabit Network Connection 51 * 82567LF-2 Gigabit Network Connection 52 * 82567V-2 Gigabit Network Connection 53 * 82567LF-3 Gigabit Network Connection 54 * 82567LM-3 Gigabit Network Connection 55 * 82567LM-4 Gigabit Network Connection 56 * 82577LM Gigabit Network Connection 57 * 82577LC Gigabit Network Connection 58 * 82578DM Gigabit Network Connection 59 * 82578DC Gigabit Network Connection 60 * 82579LM Gigabit Network Connection 61 * 82579V Gigabit Network Connection 62 * Ethernet Connection I217-LM 63 * Ethernet Connection I217-V 64 * Ethernet Connection I218-V 65 * Ethernet Connection I218-LM 66 * Ethernet Connection (2) I218-LM 67 * Ethernet Connection (2) I218-V 68 * Ethernet Connection (3) I218-LM 69 * Ethernet Connection (3) I218-V 70 */ 71 72 #include "e1000_api.h" 73 74 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); 75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw); 76 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); 77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw); 78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 80 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); 81 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); 83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, 84 u8 *mc_addr_list, 85 u32 mc_addr_count); 86 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); 87 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); 88 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); 89 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, 90 bool active); 91 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, 92 bool active); 93 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 94 u16 words, u16 *data); 95 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, 96 u16 *data); 97 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 98 u16 words, u16 *data); 99 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); 100 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); 101 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw); 102 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, 103 u16 *data); 104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); 105 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); 106 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); 107 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); 108 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); 109 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); 110 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); 111 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, 112 u16 *speed, u16 *duplex); 113 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); 114 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); 115 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); 116 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 117 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); 118 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 119 static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 120 static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); 122 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); 123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); 124 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 125 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, 126 u32 offset, u8 *data); 127 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 128 u8 size, u16 *data); 129 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, 130 u32 *data); 131 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, 132 u32 offset, u32 *data); 133 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, 134 u32 offset, u32 data); 135 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, 136 u32 offset, u32 dword); 137 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, 138 u32 offset, u16 *data); 139 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 140 u32 offset, u8 byte); 141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); 142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw); 144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr); 148 149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 150 /* Offset 04h HSFSTS */ 151 union ich8_hws_flash_status { 152 struct ich8_hsfsts { 153 u16 flcdone:1; /* bit 0 Flash Cycle Done */ 154 u16 flcerr:1; /* bit 1 Flash Cycle Error */ 155 u16 dael:1; /* bit 2 Direct Access error Log */ 156 u16 berasesz:2; /* bit 4:3 Sector Erase Size */ 157 u16 flcinprog:1; /* bit 5 flash cycle in Progress */ 158 u16 reserved1:2; /* bit 13:6 Reserved */ 159 u16 reserved2:6; /* bit 13:6 Reserved */ 160 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ 161 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ 162 } hsf_status; 163 u16 regval; 164 }; 165 166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ 167 /* Offset 06h FLCTL */ 168 union ich8_hws_flash_ctrl { 169 struct ich8_hsflctl { 170 u16 flcgo:1; /* 0 Flash Cycle Go */ 171 u16 flcycle:2; /* 2:1 Flash Cycle */ 172 u16 reserved:5; /* 7:3 Reserved */ 173 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ 174 u16 flockdn:6; /* 15:10 Reserved */ 175 } hsf_ctrl; 176 u16 regval; 177 }; 178 179 /* ICH Flash Region Access Permissions */ 180 union ich8_hws_flash_regacc { 181 struct ich8_flracc { 182 u32 grra:8; /* 0:7 GbE region Read Access */ 183 u32 grwa:8; /* 8:15 GbE region Write Access */ 184 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ 185 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ 186 } hsf_flregacc; 187 u16 regval; 188 }; 189 190 /** 191 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers 192 * @hw: pointer to the HW structure 193 * 194 * Test access to the PHY registers by reading the PHY ID registers. If 195 * the PHY ID is already known (e.g. resume path) compare it with known ID, 196 * otherwise assume the read PHY ID is correct if it is valid. 197 * 198 * Assumes the sw/fw/hw semaphore is already acquired. 199 **/ 200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) 201 { 202 u16 phy_reg = 0; 203 u32 phy_id = 0; 204 s32 ret_val = 0; 205 u16 retry_count; 206 u32 mac_reg = 0; 207 208 for (retry_count = 0; retry_count < 2; retry_count++) { 209 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg); 210 if (ret_val || (phy_reg == 0xFFFF)) 211 continue; 212 phy_id = (u32)(phy_reg << 16); 213 214 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg); 215 if (ret_val || (phy_reg == 0xFFFF)) { 216 phy_id = 0; 217 continue; 218 } 219 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); 220 break; 221 } 222 223 if (hw->phy.id) { 224 if (hw->phy.id == phy_id) 225 goto out; 226 } else if (phy_id) { 227 hw->phy.id = phy_id; 228 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); 229 goto out; 230 } 231 232 /* In case the PHY needs to be in mdio slow mode, 233 * set slow mode and try to get the PHY id again. 234 */ 235 if (hw->mac.type < e1000_pch_lpt) { 236 hw->phy.ops.release(hw); 237 ret_val = e1000_set_mdio_slow_mode_hv(hw); 238 if (!ret_val) 239 ret_val = e1000_get_phy_id(hw); 240 hw->phy.ops.acquire(hw); 241 } 242 243 if (ret_val) 244 return FALSE; 245 out: 246 if (hw->mac.type >= e1000_pch_lpt) { 247 /* Only unforce SMBus if ME is not active */ 248 if (!(E1000_READ_REG(hw, E1000_FWSM) & 249 E1000_ICH_FWSM_FW_VALID)) { 250 /* Unforce SMBus mode in PHY */ 251 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg); 252 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 253 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg); 254 255 /* Unforce SMBus mode in MAC */ 256 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 257 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 258 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 259 } 260 } 261 262 return TRUE; 263 } 264 265 /** 266 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value 267 * @hw: pointer to the HW structure 268 * 269 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is 270 * used to reset the PHY to a quiescent state when necessary. 271 **/ 272 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) 273 { 274 u32 mac_reg; 275 276 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt"); 277 278 /* Set Phy Config Counter to 50msec */ 279 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3); 280 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; 281 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 282 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg); 283 284 /* Toggle LANPHYPC Value bit */ 285 mac_reg = E1000_READ_REG(hw, E1000_CTRL); 286 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; 287 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; 288 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); 289 E1000_WRITE_FLUSH(hw); 290 msec_delay(1); 291 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 292 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); 293 E1000_WRITE_FLUSH(hw); 294 295 if (hw->mac.type < e1000_pch_lpt) { 296 msec_delay(50); 297 } else { 298 u16 count = 20; 299 300 do { 301 msec_delay(5); 302 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) & 303 E1000_CTRL_EXT_LPCD) && count--); 304 305 msec_delay(30); 306 } 307 } 308 309 /** 310 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds 311 * @hw: pointer to the HW structure 312 * 313 * Workarounds/flow necessary for PHY initialization during driver load 314 * and resume paths. 315 **/ 316 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) 317 { 318 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM); 319 s32 ret_val; 320 321 DEBUGFUNC("e1000_init_phy_workarounds_pchlan"); 322 323 /* Gate automatic PHY configuration by hardware on managed and 324 * non-managed 82579 and newer adapters. 325 */ 326 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 327 328 /* It is not possible to be certain of the current state of ULP 329 * so forcibly disable it. 330 */ 331 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; 332 e1000_disable_ulp_lpt_lp(hw, TRUE); 333 334 ret_val = hw->phy.ops.acquire(hw); 335 if (ret_val) { 336 DEBUGOUT("Failed to initialize PHY flow\n"); 337 goto out; 338 } 339 340 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is 341 * inaccessible and resetting the PHY is not blocked, toggle the 342 * LANPHYPC Value bit to force the interconnect to PCIe mode. 343 */ 344 switch (hw->mac.type) { 345 case e1000_pch_lpt: 346 case e1000_pch_spt: 347 case e1000_pch_cnp: 348 if (e1000_phy_is_accessible_pchlan(hw)) 349 break; 350 351 /* Before toggling LANPHYPC, see if PHY is accessible by 352 * forcing MAC to SMBus mode first. 353 */ 354 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 355 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 356 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 357 358 /* Wait 50 milliseconds for MAC to finish any retries 359 * that it might be trying to perform from previous 360 * attempts to acknowledge any phy read requests. 361 */ 362 msec_delay(50); 363 364 /* fall-through */ 365 case e1000_pch2lan: 366 if (e1000_phy_is_accessible_pchlan(hw)) 367 break; 368 369 /* fall-through */ 370 case e1000_pchlan: 371 if ((hw->mac.type == e1000_pchlan) && 372 (fwsm & E1000_ICH_FWSM_FW_VALID)) 373 break; 374 375 if (hw->phy.ops.check_reset_block(hw)) { 376 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n"); 377 ret_val = -E1000_ERR_PHY; 378 break; 379 } 380 381 /* Toggle LANPHYPC Value bit */ 382 e1000_toggle_lanphypc_pch_lpt(hw); 383 if (hw->mac.type >= e1000_pch_lpt) { 384 if (e1000_phy_is_accessible_pchlan(hw)) 385 break; 386 387 /* Toggling LANPHYPC brings the PHY out of SMBus mode 388 * so ensure that the MAC is also out of SMBus mode 389 */ 390 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 391 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 392 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 393 394 if (e1000_phy_is_accessible_pchlan(hw)) 395 break; 396 397 ret_val = -E1000_ERR_PHY; 398 } 399 break; 400 default: 401 break; 402 } 403 404 hw->phy.ops.release(hw); 405 if (!ret_val) { 406 407 /* Check to see if able to reset PHY. Print error if not */ 408 if (hw->phy.ops.check_reset_block(hw)) { 409 ERROR_REPORT("Reset blocked by ME\n"); 410 goto out; 411 } 412 413 /* Reset the PHY before any access to it. Doing so, ensures 414 * that the PHY is in a known good state before we read/write 415 * PHY registers. The generic reset is sufficient here, 416 * because we haven't determined the PHY type yet. 417 */ 418 ret_val = e1000_phy_hw_reset_generic(hw); 419 if (ret_val) 420 goto out; 421 422 /* On a successful reset, possibly need to wait for the PHY 423 * to quiesce to an accessible state before returning control 424 * to the calling function. If the PHY does not quiesce, then 425 * return E1000E_BLK_PHY_RESET, as this is the condition that 426 * the PHY is in. 427 */ 428 ret_val = hw->phy.ops.check_reset_block(hw); 429 if (ret_val) 430 ERROR_REPORT("ME blocked access to PHY after reset\n"); 431 } 432 433 out: 434 /* Ungate automatic PHY configuration on non-managed 82579 */ 435 if ((hw->mac.type == e1000_pch2lan) && 436 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 437 msec_delay(10); 438 e1000_gate_hw_phy_config_ich8lan(hw, FALSE); 439 } 440 441 return ret_val; 442 } 443 444 /** 445 * e1000_init_phy_params_pchlan - Initialize PHY function pointers 446 * @hw: pointer to the HW structure 447 * 448 * Initialize family-specific PHY parameters and function pointers. 449 **/ 450 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 451 { 452 struct e1000_phy_info *phy = &hw->phy; 453 s32 ret_val; 454 455 DEBUGFUNC("e1000_init_phy_params_pchlan"); 456 457 phy->addr = 1; 458 phy->reset_delay_us = 100; 459 460 phy->ops.acquire = e1000_acquire_swflag_ich8lan; 461 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; 462 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; 463 phy->ops.set_page = e1000_set_page_igp; 464 phy->ops.read_reg = e1000_read_phy_reg_hv; 465 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; 466 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; 467 phy->ops.release = e1000_release_swflag_ich8lan; 468 phy->ops.reset = e1000_phy_hw_reset_ich8lan; 469 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; 470 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; 471 phy->ops.write_reg = e1000_write_phy_reg_hv; 472 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; 473 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; 474 phy->ops.power_up = e1000_power_up_phy_copper; 475 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 476 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 477 478 phy->id = e1000_phy_unknown; 479 480 ret_val = e1000_init_phy_workarounds_pchlan(hw); 481 if (ret_val) 482 return ret_val; 483 484 if (phy->id == e1000_phy_unknown) 485 switch (hw->mac.type) { 486 default: 487 ret_val = e1000_get_phy_id(hw); 488 if (ret_val) 489 return ret_val; 490 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) 491 break; 492 /* fall-through */ 493 case e1000_pch2lan: 494 case e1000_pch_lpt: 495 case e1000_pch_spt: 496 case e1000_pch_cnp: 497 /* In case the PHY needs to be in mdio slow mode, 498 * set slow mode and try to get the PHY id again. 499 */ 500 ret_val = e1000_set_mdio_slow_mode_hv(hw); 501 if (ret_val) 502 return ret_val; 503 ret_val = e1000_get_phy_id(hw); 504 if (ret_val) 505 return ret_val; 506 break; 507 } 508 phy->type = e1000_get_phy_type_from_id(phy->id); 509 510 switch (phy->type) { 511 case e1000_phy_82577: 512 case e1000_phy_82579: 513 case e1000_phy_i217: 514 phy->ops.check_polarity = e1000_check_polarity_82577; 515 phy->ops.force_speed_duplex = 516 e1000_phy_force_speed_duplex_82577; 517 phy->ops.get_cable_length = e1000_get_cable_length_82577; 518 phy->ops.get_info = e1000_get_phy_info_82577; 519 phy->ops.commit = e1000_phy_sw_reset_generic; 520 break; 521 case e1000_phy_82578: 522 phy->ops.check_polarity = e1000_check_polarity_m88; 523 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 524 phy->ops.get_cable_length = e1000_get_cable_length_m88; 525 phy->ops.get_info = e1000_get_phy_info_m88; 526 break; 527 default: 528 ret_val = -E1000_ERR_PHY; 529 break; 530 } 531 532 return ret_val; 533 } 534 535 /** 536 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers 537 * @hw: pointer to the HW structure 538 * 539 * Initialize family-specific PHY parameters and function pointers. 540 **/ 541 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) 542 { 543 struct e1000_phy_info *phy = &hw->phy; 544 s32 ret_val; 545 u16 i = 0; 546 547 DEBUGFUNC("e1000_init_phy_params_ich8lan"); 548 549 phy->addr = 1; 550 phy->reset_delay_us = 100; 551 552 phy->ops.acquire = e1000_acquire_swflag_ich8lan; 553 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; 554 phy->ops.get_cable_length = e1000_get_cable_length_igp_2; 555 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; 556 phy->ops.read_reg = e1000_read_phy_reg_igp; 557 phy->ops.release = e1000_release_swflag_ich8lan; 558 phy->ops.reset = e1000_phy_hw_reset_ich8lan; 559 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; 560 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; 561 phy->ops.write_reg = e1000_write_phy_reg_igp; 562 phy->ops.power_up = e1000_power_up_phy_copper; 563 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 564 565 /* We may need to do this twice - once for IGP and if that fails, 566 * we'll set BM func pointers and try again 567 */ 568 ret_val = e1000_determine_phy_address(hw); 569 if (ret_val) { 570 phy->ops.write_reg = e1000_write_phy_reg_bm; 571 phy->ops.read_reg = e1000_read_phy_reg_bm; 572 ret_val = e1000_determine_phy_address(hw); 573 if (ret_val) { 574 DEBUGOUT("Cannot determine PHY addr. Erroring out\n"); 575 return ret_val; 576 } 577 } 578 579 phy->id = 0; 580 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && 581 (i++ < 100)) { 582 msec_delay(1); 583 ret_val = e1000_get_phy_id(hw); 584 if (ret_val) 585 return ret_val; 586 } 587 588 /* Verify phy id */ 589 switch (phy->id) { 590 case IGP03E1000_E_PHY_ID: 591 phy->type = e1000_phy_igp_3; 592 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 593 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked; 594 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked; 595 phy->ops.get_info = e1000_get_phy_info_igp; 596 phy->ops.check_polarity = e1000_check_polarity_igp; 597 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; 598 break; 599 case IFE_E_PHY_ID: 600 case IFE_PLUS_E_PHY_ID: 601 case IFE_C_E_PHY_ID: 602 phy->type = e1000_phy_ife; 603 phy->autoneg_mask = E1000_ALL_NOT_GIG; 604 phy->ops.get_info = e1000_get_phy_info_ife; 605 phy->ops.check_polarity = e1000_check_polarity_ife; 606 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; 607 break; 608 case BME1000_E_PHY_ID: 609 phy->type = e1000_phy_bm; 610 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 611 phy->ops.read_reg = e1000_read_phy_reg_bm; 612 phy->ops.write_reg = e1000_write_phy_reg_bm; 613 phy->ops.commit = e1000_phy_sw_reset_generic; 614 phy->ops.get_info = e1000_get_phy_info_m88; 615 phy->ops.check_polarity = e1000_check_polarity_m88; 616 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; 617 break; 618 default: 619 return -E1000_ERR_PHY; 620 break; 621 } 622 623 return E1000_SUCCESS; 624 } 625 626 /** 627 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers 628 * @hw: pointer to the HW structure 629 * 630 * Initialize family-specific NVM parameters and function 631 * pointers. 632 **/ 633 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) 634 { 635 struct e1000_nvm_info *nvm = &hw->nvm; 636 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 637 u32 gfpreg, sector_base_addr, sector_end_addr; 638 u16 i; 639 u32 nvm_size; 640 641 DEBUGFUNC("e1000_init_nvm_params_ich8lan"); 642 643 nvm->type = e1000_nvm_flash_sw; 644 645 if (hw->mac.type >= e1000_pch_spt) { 646 /* in SPT, gfpreg doesn't exist. NVM size is taken from the 647 * STRAP register. This is because in SPT the GbE Flash region 648 * is no longer accessed through the flash registers. Instead, 649 * the mechanism has changed, and the Flash region access 650 * registers are now implemented in GbE memory space. 651 */ 652 nvm->flash_base_addr = 0; 653 nvm_size = 654 (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1) 655 * NVM_SIZE_MULTIPLIER; 656 nvm->flash_bank_size = nvm_size / 2; 657 /* Adjust to word count */ 658 nvm->flash_bank_size /= sizeof(u16); 659 /* Set the base address for flash register access */ 660 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; 661 } else { 662 /* Can't read flash registers if register set isn't mapped. */ 663 if (!hw->flash_address) { 664 DEBUGOUT("ERROR: Flash registers not mapped\n"); 665 return -E1000_ERR_CONFIG; 666 } 667 668 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); 669 670 /* sector_X_addr is a "sector"-aligned address (4096 bytes) 671 * Add 1 to sector_end_addr since this sector is included in 672 * the overall size. 673 */ 674 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 675 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 676 677 /* flash_base_addr is byte-aligned */ 678 nvm->flash_base_addr = sector_base_addr 679 << FLASH_SECTOR_ADDR_SHIFT; 680 681 /* find total size of the NVM, then cut in half since the total 682 * size represents two separate NVM banks. 683 */ 684 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) 685 << FLASH_SECTOR_ADDR_SHIFT); 686 nvm->flash_bank_size /= 2; 687 /* Adjust to word count */ 688 nvm->flash_bank_size /= sizeof(u16); 689 } 690 691 nvm->word_size = E1000_SHADOW_RAM_WORDS; 692 693 /* Clear shadow ram */ 694 for (i = 0; i < nvm->word_size; i++) { 695 dev_spec->shadow_ram[i].modified = FALSE; 696 dev_spec->shadow_ram[i].value = 0xFFFF; 697 } 698 699 /* Function Pointers */ 700 nvm->ops.acquire = e1000_acquire_nvm_ich8lan; 701 nvm->ops.release = e1000_release_nvm_ich8lan; 702 if (hw->mac.type >= e1000_pch_spt) { 703 nvm->ops.read = e1000_read_nvm_spt; 704 nvm->ops.update = e1000_update_nvm_checksum_spt; 705 } else { 706 nvm->ops.read = e1000_read_nvm_ich8lan; 707 nvm->ops.update = e1000_update_nvm_checksum_ich8lan; 708 } 709 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; 710 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; 711 nvm->ops.write = e1000_write_nvm_ich8lan; 712 713 return E1000_SUCCESS; 714 } 715 716 /** 717 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers 718 * @hw: pointer to the HW structure 719 * 720 * Initialize family-specific MAC parameters and function 721 * pointers. 722 **/ 723 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) 724 { 725 struct e1000_mac_info *mac = &hw->mac; 726 727 DEBUGFUNC("e1000_init_mac_params_ich8lan"); 728 729 /* Set media type function pointer */ 730 hw->phy.media_type = e1000_media_type_copper; 731 732 /* Set mta register count */ 733 mac->mta_reg_count = 32; 734 /* Set rar entry count */ 735 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; 736 if (mac->type == e1000_ich8lan) 737 mac->rar_entry_count--; 738 /* Set if part includes ASF firmware */ 739 mac->asf_firmware_present = TRUE; 740 /* FWSM register */ 741 mac->has_fwsm = TRUE; 742 /* ARC subsystem not supported */ 743 mac->arc_subsystem_valid = FALSE; 744 /* Adaptive IFS supported */ 745 mac->adaptive_ifs = TRUE; 746 747 /* Function pointers */ 748 749 /* bus type/speed/width */ 750 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan; 751 /* function id */ 752 mac->ops.set_lan_id = e1000_set_lan_id_single_port; 753 /* reset */ 754 mac->ops.reset_hw = e1000_reset_hw_ich8lan; 755 /* hw initialization */ 756 mac->ops.init_hw = e1000_init_hw_ich8lan; 757 /* link setup */ 758 mac->ops.setup_link = e1000_setup_link_ich8lan; 759 /* physical interface setup */ 760 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan; 761 /* check for link */ 762 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan; 763 /* link info */ 764 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan; 765 /* multicast address update */ 766 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; 767 /* clear hardware counters */ 768 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; 769 770 /* LED and other operations */ 771 switch (mac->type) { 772 case e1000_ich8lan: 773 case e1000_ich9lan: 774 case e1000_ich10lan: 775 /* check management mode */ 776 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; 777 /* ID LED init */ 778 mac->ops.id_led_init = e1000_id_led_init_generic; 779 /* blink LED */ 780 mac->ops.blink_led = e1000_blink_led_generic; 781 /* setup LED */ 782 mac->ops.setup_led = e1000_setup_led_generic; 783 /* cleanup LED */ 784 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; 785 /* turn on/off LED */ 786 mac->ops.led_on = e1000_led_on_ich8lan; 787 mac->ops.led_off = e1000_led_off_ich8lan; 788 break; 789 case e1000_pch2lan: 790 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; 791 mac->ops.rar_set = e1000_rar_set_pch2lan; 792 /* fall-through */ 793 case e1000_pch_lpt: 794 case e1000_pch_spt: 795 case e1000_pch_cnp: 796 /* multicast address update for pch2 */ 797 mac->ops.update_mc_addr_list = 798 e1000_update_mc_addr_list_pch2lan; 799 /* fall-through */ 800 case e1000_pchlan: 801 /* check management mode */ 802 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; 803 /* ID LED init */ 804 mac->ops.id_led_init = e1000_id_led_init_pchlan; 805 /* setup LED */ 806 mac->ops.setup_led = e1000_setup_led_pchlan; 807 /* cleanup LED */ 808 mac->ops.cleanup_led = e1000_cleanup_led_pchlan; 809 /* turn on/off LED */ 810 mac->ops.led_on = e1000_led_on_pchlan; 811 mac->ops.led_off = e1000_led_off_pchlan; 812 break; 813 default: 814 break; 815 } 816 817 if (mac->type >= e1000_pch_lpt) { 818 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; 819 mac->ops.rar_set = e1000_rar_set_pch_lpt; 820 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt; 821 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt; 822 } 823 824 /* Enable PCS Lock-loss workaround for ICH8 */ 825 if (mac->type == e1000_ich8lan) 826 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE); 827 828 return E1000_SUCCESS; 829 } 830 831 /** 832 * __e1000_access_emi_reg_locked - Read/write EMI register 833 * @hw: pointer to the HW structure 834 * @addr: EMI address to program 835 * @data: pointer to value to read/write from/to the EMI address 836 * @read: boolean flag to indicate read or write 837 * 838 * This helper function assumes the SW/FW/HW Semaphore is already acquired. 839 **/ 840 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, 841 u16 *data, bool read) 842 { 843 s32 ret_val; 844 845 DEBUGFUNC("__e1000_access_emi_reg_locked"); 846 847 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address); 848 if (ret_val) 849 return ret_val; 850 851 if (read) 852 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA, 853 data); 854 else 855 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 856 *data); 857 858 return ret_val; 859 } 860 861 /** 862 * e1000_read_emi_reg_locked - Read Extended Management Interface register 863 * @hw: pointer to the HW structure 864 * @addr: EMI address to program 865 * @data: value to be read from the EMI address 866 * 867 * Assumes the SW/FW/HW Semaphore is already acquired. 868 **/ 869 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) 870 { 871 DEBUGFUNC("e1000_read_emi_reg_locked"); 872 873 return __e1000_access_emi_reg_locked(hw, addr, data, TRUE); 874 } 875 876 /** 877 * e1000_write_emi_reg_locked - Write Extended Management Interface register 878 * @hw: pointer to the HW structure 879 * @addr: EMI address to program 880 * @data: value to be written to the EMI address 881 * 882 * Assumes the SW/FW/HW Semaphore is already acquired. 883 **/ 884 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) 885 { 886 DEBUGFUNC("e1000_read_emi_reg_locked"); 887 888 return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE); 889 } 890 891 /** 892 * e1000_set_eee_pchlan - Enable/disable EEE support 893 * @hw: pointer to the HW structure 894 * 895 * Enable/disable EEE based on setting in dev_spec structure, the duplex of 896 * the link and the EEE capabilities of the link partner. The LPI Control 897 * register bits will remain set only if/when link is up. 898 * 899 * EEE LPI must not be asserted earlier than one second after link is up. 900 * On 82579, EEE LPI should not be enabled until such time otherwise there 901 * can be link issues with some switches. Other devices can have EEE LPI 902 * enabled immediately upon link up since they have a timer in hardware which 903 * prevents LPI from being asserted too early. 904 **/ 905 s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 906 { 907 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 908 s32 ret_val; 909 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; 910 911 DEBUGFUNC("e1000_set_eee_pchlan"); 912 913 switch (hw->phy.type) { 914 case e1000_phy_82579: 915 lpa = I82579_EEE_LP_ABILITY; 916 pcs_status = I82579_EEE_PCS_STATUS; 917 adv_addr = I82579_EEE_ADVERTISEMENT; 918 break; 919 case e1000_phy_i217: 920 lpa = I217_EEE_LP_ABILITY; 921 pcs_status = I217_EEE_PCS_STATUS; 922 adv_addr = I217_EEE_ADVERTISEMENT; 923 break; 924 default: 925 return E1000_SUCCESS; 926 } 927 928 ret_val = hw->phy.ops.acquire(hw); 929 if (ret_val) 930 return ret_val; 931 932 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); 933 if (ret_val) 934 goto release; 935 936 /* Clear bits that enable EEE in various speeds */ 937 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; 938 939 /* Enable EEE if not disabled by user */ 940 if (!dev_spec->eee_disable) { 941 /* Save off link partner's EEE ability */ 942 ret_val = e1000_read_emi_reg_locked(hw, lpa, 943 &dev_spec->eee_lp_ability); 944 if (ret_val) 945 goto release; 946 947 /* Read EEE advertisement */ 948 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); 949 if (ret_val) 950 goto release; 951 952 /* Enable EEE only for speeds in which the link partner is 953 * EEE capable and for which we advertise EEE. 954 */ 955 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) 956 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; 957 958 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { 959 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data); 960 if (data & NWAY_LPAR_100TX_FD_CAPS) 961 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; 962 else 963 /* EEE is not supported in 100Half, so ignore 964 * partner's EEE in 100 ability if full-duplex 965 * is not advertised. 966 */ 967 dev_spec->eee_lp_ability &= 968 ~I82579_EEE_100_SUPPORTED; 969 } 970 } 971 972 if (hw->phy.type == e1000_phy_82579) { 973 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 974 &data); 975 if (ret_val) 976 goto release; 977 978 data &= ~I82579_LPI_100_PLL_SHUT; 979 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, 980 data); 981 } 982 983 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 984 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); 985 if (ret_val) 986 goto release; 987 988 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl); 989 release: 990 hw->phy.ops.release(hw); 991 992 return ret_val; 993 } 994 995 /** 996 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP 997 * @hw: pointer to the HW structure 998 * @link: link up bool flag 999 * 1000 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications 1001 * preventing further DMA write requests. Workaround the issue by disabling 1002 * the de-assertion of the clock request when in 1Gpbs mode. 1003 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link 1004 * speeds in order to avoid Tx hangs. 1005 **/ 1006 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) 1007 { 1008 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); 1009 u32 status = E1000_READ_REG(hw, E1000_STATUS); 1010 s32 ret_val = E1000_SUCCESS; 1011 u16 reg; 1012 1013 if (link && (status & E1000_STATUS_SPEED_1000)) { 1014 ret_val = hw->phy.ops.acquire(hw); 1015 if (ret_val) 1016 return ret_val; 1017 1018 ret_val = 1019 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 1020 ®); 1021 if (ret_val) 1022 goto release; 1023 1024 ret_val = 1025 e1000_write_kmrn_reg_locked(hw, 1026 E1000_KMRNCTRLSTA_K1_CONFIG, 1027 reg & 1028 ~E1000_KMRNCTRLSTA_K1_ENABLE); 1029 if (ret_val) 1030 goto release; 1031 1032 usec_delay(10); 1033 1034 E1000_WRITE_REG(hw, E1000_FEXTNVM6, 1035 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); 1036 1037 ret_val = 1038 e1000_write_kmrn_reg_locked(hw, 1039 E1000_KMRNCTRLSTA_K1_CONFIG, 1040 reg); 1041 release: 1042 hw->phy.ops.release(hw); 1043 } else { 1044 /* clear FEXTNVM6 bit 8 on link down or 10/100 */ 1045 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; 1046 1047 if ((hw->phy.revision > 5) || !link || 1048 ((status & E1000_STATUS_SPEED_100) && 1049 (status & E1000_STATUS_FD))) 1050 goto update_fextnvm6; 1051 1052 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®); 1053 if (ret_val) 1054 return ret_val; 1055 1056 /* Clear link status transmit timeout */ 1057 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; 1058 1059 if (status & E1000_STATUS_SPEED_100) { 1060 /* Set inband Tx timeout to 5x10us for 100Half */ 1061 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 1062 1063 /* Do not extend the K1 entry latency for 100Half */ 1064 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 1065 } else { 1066 /* Set inband Tx timeout to 50x10us for 10Full/Half */ 1067 reg |= 50 << 1068 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 1069 1070 /* Extend the K1 entry latency for 10 Mbps */ 1071 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 1072 } 1073 1074 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg); 1075 if (ret_val) 1076 return ret_val; 1077 1078 update_fextnvm6: 1079 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); 1080 } 1081 1082 return ret_val; 1083 } 1084 1085 static u64 e1000_ltr2ns(u16 ltr) 1086 { 1087 u32 value, scale; 1088 1089 /* Determine the latency in nsec based on the LTR value & scale */ 1090 value = ltr & E1000_LTRV_VALUE_MASK; 1091 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT; 1092 1093 return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR)); 1094 } 1095 1096 /** 1097 * e1000_platform_pm_pch_lpt - Set platform power management values 1098 * @hw: pointer to the HW structure 1099 * @link: bool indicating link status 1100 * 1101 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" 1102 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed 1103 * when link is up (which must not exceed the maximum latency supported 1104 * by the platform), otherwise specify there is no LTR requirement. 1105 * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop 1106 * latencies in the LTR Extended Capability Structure in the PCIe Extended 1107 * Capability register set, on this device LTR is set by writing the 1108 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and 1109 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) 1110 * message to the PMC. 1111 * 1112 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF) 1113 * high-water mark. 1114 **/ 1115 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) 1116 { 1117 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | 1118 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; 1119 u16 lat_enc = 0; /* latency encoded */ 1120 s32 obff_hwm = 0; 1121 1122 DEBUGFUNC("e1000_platform_pm_pch_lpt"); 1123 1124 if (link) { 1125 u16 speed, duplex, scale = 0; 1126 u16 max_snoop, max_nosnoop; 1127 u16 max_ltr_enc; /* max LTR latency encoded */ 1128 s64 lat_ns; 1129 s64 value; 1130 u32 rxa; 1131 1132 if (!hw->mac.max_frame_size) { 1133 DEBUGOUT("max_frame_size not set.\n"); 1134 return -E1000_ERR_CONFIG; 1135 } 1136 1137 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 1138 if (!speed) { 1139 DEBUGOUT("Speed not set.\n"); 1140 return -E1000_ERR_CONFIG; 1141 } 1142 1143 /* Rx Packet Buffer Allocation size (KB) */ 1144 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK; 1145 1146 /* Determine the maximum latency tolerated by the device. 1147 * 1148 * Per the PCIe spec, the tolerated latencies are encoded as 1149 * a 3-bit encoded scale (only 0-5 are valid) multiplied by 1150 * a 10-bit value (0-1023) to provide a range from 1 ns to 1151 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, 1152 * 1=2^5ns, 2=2^10ns,...5=2^25ns. 1153 */ 1154 lat_ns = ((s64)rxa * 1024 - 1155 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000; 1156 if (lat_ns < 0) 1157 lat_ns = 0; 1158 else 1159 lat_ns /= speed; 1160 value = lat_ns; 1161 1162 while (value > E1000_LTRV_VALUE_MASK) { 1163 scale++; 1164 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5)); 1165 } 1166 if (scale > E1000_LTRV_SCALE_MAX) { 1167 DEBUGOUT1("Invalid LTR latency scale %d\n", scale); 1168 return -E1000_ERR_CONFIG; 1169 } 1170 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value); 1171 1172 /* Determine the maximum latency tolerated by the platform */ 1173 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop); 1174 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); 1175 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop); 1176 1177 if (lat_enc > max_ltr_enc) { 1178 lat_enc = max_ltr_enc; 1179 lat_ns = e1000_ltr2ns(max_ltr_enc); 1180 } 1181 1182 if (lat_ns) { 1183 lat_ns *= speed * 1000; 1184 lat_ns /= 8; 1185 lat_ns /= 1000000000; 1186 obff_hwm = (s32)(rxa - lat_ns); 1187 } 1188 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) { 1189 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm); 1190 return -E1000_ERR_CONFIG; 1191 } 1192 } 1193 1194 /* Set Snoop and No-Snoop latencies the same */ 1195 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); 1196 E1000_WRITE_REG(hw, E1000_LTRV, reg); 1197 1198 /* Set OBFF high water mark */ 1199 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK; 1200 reg |= obff_hwm; 1201 E1000_WRITE_REG(hw, E1000_SVT, reg); 1202 1203 /* Enable OBFF */ 1204 reg = E1000_READ_REG(hw, E1000_SVCR); 1205 reg |= E1000_SVCR_OFF_EN; 1206 /* Always unblock interrupts to the CPU even when the system is 1207 * in OBFF mode. This ensures that small round-robin traffic 1208 * (like ping) does not get dropped or experience long latency. 1209 */ 1210 reg |= E1000_SVCR_OFF_MASKINT; 1211 E1000_WRITE_REG(hw, E1000_SVCR, reg); 1212 1213 return E1000_SUCCESS; 1214 } 1215 1216 /** 1217 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer 1218 * @hw: pointer to the HW structure 1219 * @itr: interrupt throttling rate 1220 * 1221 * Configure OBFF with the updated interrupt rate. 1222 **/ 1223 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr) 1224 { 1225 u32 svcr; 1226 s32 timer; 1227 1228 DEBUGFUNC("e1000_set_obff_timer_pch_lpt"); 1229 1230 /* Convert ITR value into microseconds for OBFF timer */ 1231 timer = itr & E1000_ITR_MASK; 1232 timer = (timer * E1000_ITR_MULT) / 1000; 1233 1234 if ((timer < 0) || (timer > E1000_ITR_MASK)) { 1235 DEBUGOUT1("Invalid OBFF timer %d\n", timer); 1236 return -E1000_ERR_CONFIG; 1237 } 1238 1239 svcr = E1000_READ_REG(hw, E1000_SVCR); 1240 svcr &= ~E1000_SVCR_OFF_TIMER_MASK; 1241 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT; 1242 E1000_WRITE_REG(hw, E1000_SVCR, svcr); 1243 1244 return E1000_SUCCESS; 1245 } 1246 1247 /** 1248 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP 1249 * @hw: pointer to the HW structure 1250 * @to_sx: boolean indicating a system power state transition to Sx 1251 * 1252 * When link is down, configure ULP mode to significantly reduce the power 1253 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the 1254 * ME firmware to start the ULP configuration. If not on an ME enabled 1255 * system, configure the ULP mode by software. 1256 */ 1257 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) 1258 { 1259 u32 mac_reg; 1260 s32 ret_val = E1000_SUCCESS; 1261 u16 phy_reg; 1262 u16 oem_reg = 0; 1263 1264 if ((hw->mac.type < e1000_pch_lpt) || 1265 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || 1266 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || 1267 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || 1268 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || 1269 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) 1270 return 0; 1271 1272 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { 1273 /* Request ME configure ULP mode in the PHY */ 1274 mac_reg = E1000_READ_REG(hw, E1000_H2ME); 1275 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; 1276 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); 1277 1278 goto out; 1279 } 1280 1281 if (!to_sx) { 1282 int i = 0; 1283 1284 /* Poll up to 5 seconds for Cable Disconnected indication */ 1285 while (!(E1000_READ_REG(hw, E1000_FEXT) & 1286 E1000_FEXT_PHY_CABLE_DISCONNECTED)) { 1287 /* Bail if link is re-acquired */ 1288 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) 1289 return -E1000_ERR_PHY; 1290 1291 if (i++ == 100) 1292 break; 1293 1294 msec_delay(50); 1295 } 1296 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n", 1297 (E1000_READ_REG(hw, E1000_FEXT) & 1298 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", 1299 i * 50); 1300 } 1301 1302 ret_val = hw->phy.ops.acquire(hw); 1303 if (ret_val) 1304 goto out; 1305 1306 /* Force SMBus mode in PHY */ 1307 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1308 if (ret_val) 1309 goto release; 1310 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; 1311 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); 1312 1313 /* Force SMBus mode in MAC */ 1314 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1315 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 1316 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 1317 1318 /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable 1319 * LPLU and disable Gig speed when entering ULP 1320 */ 1321 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { 1322 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, 1323 &oem_reg); 1324 if (ret_val) 1325 goto release; 1326 1327 phy_reg = oem_reg; 1328 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; 1329 1330 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, 1331 phy_reg); 1332 1333 if (ret_val) 1334 goto release; 1335 } 1336 1337 /* Set Inband ULP Exit, Reset to SMBus mode and 1338 * Disable SMBus Release on PERST# in PHY 1339 */ 1340 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); 1341 if (ret_val) 1342 goto release; 1343 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | 1344 I218_ULP_CONFIG1_DISABLE_SMB_PERST); 1345 if (to_sx) { 1346 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC) 1347 phy_reg |= I218_ULP_CONFIG1_WOL_HOST; 1348 else 1349 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; 1350 1351 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; 1352 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; 1353 } else { 1354 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; 1355 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; 1356 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; 1357 } 1358 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1359 1360 /* Set Disable SMBus Release on PERST# in MAC */ 1361 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); 1362 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; 1363 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); 1364 1365 /* Commit ULP changes in PHY by starting auto ULP configuration */ 1366 phy_reg |= I218_ULP_CONFIG1_START; 1367 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1368 1369 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && 1370 to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 1371 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, 1372 oem_reg); 1373 if (ret_val) 1374 goto release; 1375 } 1376 1377 release: 1378 hw->phy.ops.release(hw); 1379 out: 1380 if (ret_val) 1381 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val); 1382 else 1383 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; 1384 1385 return ret_val; 1386 } 1387 1388 /** 1389 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP 1390 * @hw: pointer to the HW structure 1391 * @force: boolean indicating whether or not to force disabling ULP 1392 * 1393 * Un-configure ULP mode when link is up, the system is transitioned from 1394 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled 1395 * system, poll for an indication from ME that ULP has been un-configured. 1396 * If not on an ME enabled system, un-configure the ULP mode by software. 1397 * 1398 * During nominal operation, this function is called when link is acquired 1399 * to disable ULP mode (force=FALSE); otherwise, for example when unloading 1400 * the driver or during Sx->S0 transitions, this is called with force=TRUE 1401 * to forcibly disable ULP. 1402 */ 1403 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) 1404 { 1405 s32 ret_val = E1000_SUCCESS; 1406 u32 mac_reg; 1407 u16 phy_reg; 1408 int i = 0; 1409 1410 if ((hw->mac.type < e1000_pch_lpt) || 1411 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || 1412 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || 1413 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || 1414 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || 1415 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) 1416 return 0; 1417 1418 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { 1419 if (force) { 1420 /* Request ME un-configure ULP mode in the PHY */ 1421 mac_reg = E1000_READ_REG(hw, E1000_H2ME); 1422 mac_reg &= ~E1000_H2ME_ULP; 1423 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; 1424 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); 1425 } 1426 1427 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ 1428 while (E1000_READ_REG(hw, E1000_FWSM) & 1429 E1000_FWSM_ULP_CFG_DONE) { 1430 if (i++ == 30) { 1431 ret_val = -E1000_ERR_PHY; 1432 goto out; 1433 } 1434 1435 msec_delay(10); 1436 } 1437 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); 1438 1439 if (force) { 1440 mac_reg = E1000_READ_REG(hw, E1000_H2ME); 1441 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; 1442 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); 1443 } else { 1444 /* Clear H2ME.ULP after ME ULP configuration */ 1445 mac_reg = E1000_READ_REG(hw, E1000_H2ME); 1446 mac_reg &= ~E1000_H2ME_ULP; 1447 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); 1448 } 1449 1450 goto out; 1451 } 1452 1453 ret_val = hw->phy.ops.acquire(hw); 1454 if (ret_val) 1455 goto out; 1456 1457 if (force) 1458 /* Toggle LANPHYPC Value bit */ 1459 e1000_toggle_lanphypc_pch_lpt(hw); 1460 1461 /* Unforce SMBus mode in PHY */ 1462 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1463 if (ret_val) { 1464 /* The MAC might be in PCIe mode, so temporarily force to 1465 * SMBus mode in order to access the PHY. 1466 */ 1467 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1468 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 1469 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 1470 1471 msec_delay(50); 1472 1473 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, 1474 &phy_reg); 1475 if (ret_val) 1476 goto release; 1477 } 1478 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 1479 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); 1480 1481 /* Unforce SMBus mode in MAC */ 1482 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 1483 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 1484 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); 1485 1486 /* When ULP mode was previously entered, K1 was disabled by the 1487 * hardware. Re-Enable K1 in the PHY when exiting ULP. 1488 */ 1489 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); 1490 if (ret_val) 1491 goto release; 1492 phy_reg |= HV_PM_CTRL_K1_ENABLE; 1493 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); 1494 1495 /* Clear ULP enabled configuration */ 1496 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); 1497 if (ret_val) 1498 goto release; 1499 phy_reg &= ~(I218_ULP_CONFIG1_IND | 1500 I218_ULP_CONFIG1_STICKY_ULP | 1501 I218_ULP_CONFIG1_RESET_TO_SMBUS | 1502 I218_ULP_CONFIG1_WOL_HOST | 1503 I218_ULP_CONFIG1_INBAND_EXIT | 1504 I218_ULP_CONFIG1_EN_ULP_LANPHYPC | 1505 I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST | 1506 I218_ULP_CONFIG1_DISABLE_SMB_PERST); 1507 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1508 1509 /* Commit ULP changes by starting auto ULP configuration */ 1510 phy_reg |= I218_ULP_CONFIG1_START; 1511 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1512 1513 /* Clear Disable SMBus Release on PERST# in MAC */ 1514 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); 1515 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; 1516 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); 1517 1518 release: 1519 hw->phy.ops.release(hw); 1520 if (force) { 1521 hw->phy.ops.reset(hw); 1522 msec_delay(50); 1523 } 1524 out: 1525 if (ret_val) 1526 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val); 1527 else 1528 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; 1529 1530 return ret_val; 1531 } 1532 1533 1534 1535 /** 1536 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 1537 * @hw: pointer to the HW structure 1538 * 1539 * Checks to see of the link status of the hardware has changed. If a 1540 * change in link status has been detected, then we read the PHY registers 1541 * to get the current speed/duplex if link exists. 1542 **/ 1543 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1544 { 1545 struct e1000_mac_info *mac = &hw->mac; 1546 s32 ret_val, tipg_reg = 0; 1547 u16 emi_addr, emi_val = 0; 1548 bool link; 1549 u16 phy_reg; 1550 1551 DEBUGFUNC("e1000_check_for_copper_link_ich8lan"); 1552 1553 /* We only want to go out to the PHY registers to see if Auto-Neg 1554 * has completed and/or if our link status has changed. The 1555 * get_link_status flag is set upon receiving a Link Status 1556 * Change or Rx Sequence Error interrupt. 1557 */ 1558 if (!mac->get_link_status) 1559 return E1000_SUCCESS; 1560 1561 /* First we want to see if the MII Status Register reports 1562 * link. If so, then we want to get the current speed/duplex 1563 * of the PHY. 1564 */ 1565 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 1566 if (ret_val) 1567 return ret_val; 1568 1569 if (hw->mac.type == e1000_pchlan) { 1570 ret_val = e1000_k1_gig_workaround_hv(hw, link); 1571 if (ret_val) 1572 return ret_val; 1573 } 1574 1575 /* When connected at 10Mbps half-duplex, some parts are excessively 1576 * aggressive resulting in many collisions. To avoid this, increase 1577 * the IPG and reduce Rx latency in the PHY. 1578 */ 1579 if ((hw->mac.type >= e1000_pch2lan) && link) { 1580 u16 speed, duplex; 1581 1582 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex); 1583 tipg_reg = E1000_READ_REG(hw, E1000_TIPG); 1584 tipg_reg &= ~E1000_TIPG_IPGT_MASK; 1585 1586 if (duplex == HALF_DUPLEX && speed == SPEED_10) { 1587 tipg_reg |= 0xFF; 1588 /* Reduce Rx latency in analog PHY */ 1589 emi_val = 0; 1590 } else if (hw->mac.type >= e1000_pch_spt && 1591 duplex == FULL_DUPLEX && speed != SPEED_1000) { 1592 tipg_reg |= 0xC; 1593 emi_val = 1; 1594 } else { 1595 /* Roll back the default values */ 1596 tipg_reg |= 0x08; 1597 emi_val = 1; 1598 } 1599 1600 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg); 1601 1602 ret_val = hw->phy.ops.acquire(hw); 1603 if (ret_val) 1604 return ret_val; 1605 1606 if (hw->mac.type == e1000_pch2lan) 1607 emi_addr = I82579_RX_CONFIG; 1608 else 1609 emi_addr = I217_RX_CONFIG; 1610 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); 1611 1612 1613 if (hw->mac.type >= e1000_pch_lpt) { 1614 u16 phy_reg; 1615 1616 hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG, 1617 &phy_reg); 1618 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; 1619 if (speed == SPEED_100 || speed == SPEED_10) 1620 phy_reg |= 0x3E8; 1621 else 1622 phy_reg |= 0xFA; 1623 hw->phy.ops.write_reg_locked(hw, 1624 I217_PLL_CLOCK_GATE_REG, 1625 phy_reg); 1626 1627 if (speed == SPEED_1000) { 1628 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, 1629 &phy_reg); 1630 1631 phy_reg |= HV_PM_CTRL_K1_CLK_REQ; 1632 1633 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, 1634 phy_reg); 1635 } 1636 } 1637 hw->phy.ops.release(hw); 1638 1639 if (ret_val) 1640 return ret_val; 1641 1642 if (hw->mac.type >= e1000_pch_spt) { 1643 u16 data; 1644 u16 ptr_gap; 1645 1646 if (speed == SPEED_1000) { 1647 ret_val = hw->phy.ops.acquire(hw); 1648 if (ret_val) 1649 return ret_val; 1650 1651 ret_val = hw->phy.ops.read_reg_locked(hw, 1652 PHY_REG(776, 20), 1653 &data); 1654 if (ret_val) { 1655 hw->phy.ops.release(hw); 1656 return ret_val; 1657 } 1658 1659 ptr_gap = (data & (0x3FF << 2)) >> 2; 1660 if (ptr_gap < 0x18) { 1661 data &= ~(0x3FF << 2); 1662 data |= (0x18 << 2); 1663 ret_val = 1664 hw->phy.ops.write_reg_locked(hw, 1665 PHY_REG(776, 20), data); 1666 } 1667 hw->phy.ops.release(hw); 1668 if (ret_val) 1669 return ret_val; 1670 } else { 1671 ret_val = hw->phy.ops.acquire(hw); 1672 if (ret_val) 1673 return ret_val; 1674 1675 ret_val = hw->phy.ops.write_reg_locked(hw, 1676 PHY_REG(776, 20), 1677 0xC023); 1678 hw->phy.ops.release(hw); 1679 if (ret_val) 1680 return ret_val; 1681 1682 } 1683 } 1684 } 1685 1686 /* I217 Packet Loss issue: 1687 * ensure that FEXTNVM4 Beacon Duration is set correctly 1688 * on power up. 1689 * Set the Beacon Duration for I217 to 8 usec 1690 */ 1691 if (hw->mac.type >= e1000_pch_lpt) { 1692 u32 mac_reg; 1693 1694 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); 1695 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 1696 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 1697 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); 1698 } 1699 1700 /* Work-around I218 hang issue */ 1701 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 1702 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || 1703 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) || 1704 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) { 1705 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1706 if (ret_val) 1707 return ret_val; 1708 } 1709 if (hw->mac.type >= e1000_pch_lpt) { 1710 /* Set platform power management values for 1711 * Latency Tolerance Reporting (LTR) 1712 * Optimized Buffer Flush/Fill (OBFF) 1713 */ 1714 ret_val = e1000_platform_pm_pch_lpt(hw, link); 1715 if (ret_val) 1716 return ret_val; 1717 } 1718 1719 /* Clear link partner's EEE ability */ 1720 hw->dev_spec.ich8lan.eee_lp_ability = 0; 1721 1722 if (hw->mac.type >= e1000_pch_lpt) { 1723 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); 1724 1725 if (hw->mac.type == e1000_pch_spt) { 1726 /* FEXTNVM6 K1-off workaround - for SPT only */ 1727 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG); 1728 1729 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) 1730 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; 1731 else 1732 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; 1733 } 1734 1735 if (hw->dev_spec.ich8lan.disable_k1_off == TRUE) 1736 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; 1737 1738 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); 1739 } 1740 1741 if (!link) 1742 return E1000_SUCCESS; /* No link detected */ 1743 1744 mac->get_link_status = FALSE; 1745 1746 switch (hw->mac.type) { 1747 case e1000_pch2lan: 1748 ret_val = e1000_k1_workaround_lv(hw); 1749 if (ret_val) 1750 return ret_val; 1751 /* fall-thru */ 1752 case e1000_pchlan: 1753 if (hw->phy.type == e1000_phy_82578) { 1754 ret_val = e1000_link_stall_workaround_hv(hw); 1755 if (ret_val) 1756 return ret_val; 1757 } 1758 1759 /* Workaround for PCHx parts in half-duplex: 1760 * Set the number of preambles removed from the packet 1761 * when it is passed from the PHY to the MAC to prevent 1762 * the MAC from misinterpreting the packet type. 1763 */ 1764 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); 1765 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; 1766 1767 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) != 1768 E1000_STATUS_FD) 1769 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); 1770 1771 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); 1772 break; 1773 default: 1774 break; 1775 } 1776 1777 /* Check if there was DownShift, must be checked 1778 * immediately after link-up 1779 */ 1780 e1000_check_downshift_generic(hw); 1781 1782 /* Enable/Disable EEE after link up */ 1783 if (hw->phy.type > e1000_phy_82579) { 1784 ret_val = e1000_set_eee_pchlan(hw); 1785 if (ret_val) 1786 return ret_val; 1787 } 1788 1789 /* If we are forcing speed/duplex, then we simply return since 1790 * we have already determined whether we have link or not. 1791 */ 1792 if (!mac->autoneg) 1793 return -E1000_ERR_CONFIG; 1794 1795 /* Auto-Neg is enabled. Auto Speed Detection takes care 1796 * of MAC speed/duplex configuration. So we only need to 1797 * configure Collision Distance in the MAC. 1798 */ 1799 mac->ops.config_collision_dist(hw); 1800 1801 /* Configure Flow Control now that Auto-Neg has completed. 1802 * First, we need to restore the desired flow control 1803 * settings because we may have had to re-autoneg with a 1804 * different link partner. 1805 */ 1806 ret_val = e1000_config_fc_after_link_up_generic(hw); 1807 if (ret_val) 1808 DEBUGOUT("Error configuring flow control\n"); 1809 1810 return ret_val; 1811 } 1812 1813 /** 1814 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers 1815 * @hw: pointer to the HW structure 1816 * 1817 * Initialize family-specific function pointers for PHY, MAC, and NVM. 1818 **/ 1819 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) 1820 { 1821 DEBUGFUNC("e1000_init_function_pointers_ich8lan"); 1822 1823 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan; 1824 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan; 1825 switch (hw->mac.type) { 1826 case e1000_ich8lan: 1827 case e1000_ich9lan: 1828 case e1000_ich10lan: 1829 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan; 1830 break; 1831 case e1000_pchlan: 1832 case e1000_pch2lan: 1833 case e1000_pch_lpt: 1834 case e1000_pch_spt: 1835 case e1000_pch_cnp: 1836 hw->phy.ops.init_params = e1000_init_phy_params_pchlan; 1837 break; 1838 default: 1839 break; 1840 } 1841 } 1842 1843 /** 1844 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex 1845 * @hw: pointer to the HW structure 1846 * 1847 * Acquires the mutex for performing NVM operations. 1848 **/ 1849 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) 1850 { 1851 DEBUGFUNC("e1000_acquire_nvm_ich8lan"); 1852 1853 return E1000_SUCCESS; 1854 } 1855 1856 /** 1857 * e1000_release_nvm_ich8lan - Release NVM mutex 1858 * @hw: pointer to the HW structure 1859 * 1860 * Releases the mutex used while performing NVM operations. 1861 **/ 1862 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) 1863 { 1864 DEBUGFUNC("e1000_release_nvm_ich8lan"); 1865 1866 return; 1867 } 1868 1869 /** 1870 * e1000_acquire_swflag_ich8lan - Acquire software control flag 1871 * @hw: pointer to the HW structure 1872 * 1873 * Acquires the software control flag for performing PHY and select 1874 * MAC CSR accesses. 1875 **/ 1876 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 1877 { 1878 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; 1879 s32 ret_val = E1000_SUCCESS; 1880 1881 DEBUGFUNC("e1000_acquire_swflag_ich8lan"); 1882 1883 while (timeout) { 1884 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 1885 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) 1886 break; 1887 1888 msec_delay_irq(1); 1889 timeout--; 1890 } 1891 1892 if (!timeout) { 1893 DEBUGOUT("SW has already locked the resource.\n"); 1894 ret_val = -E1000_ERR_CONFIG; 1895 goto out; 1896 } 1897 1898 timeout = SW_FLAG_TIMEOUT; 1899 1900 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 1901 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 1902 1903 while (timeout) { 1904 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 1905 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 1906 break; 1907 1908 msec_delay_irq(1); 1909 timeout--; 1910 } 1911 1912 if (!timeout) { 1913 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", 1914 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl); 1915 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 1916 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 1917 ret_val = -E1000_ERR_CONFIG; 1918 goto out; 1919 } 1920 1921 out: 1922 return ret_val; 1923 } 1924 1925 /** 1926 * e1000_release_swflag_ich8lan - Release software control flag 1927 * @hw: pointer to the HW structure 1928 * 1929 * Releases the software control flag for performing PHY and select 1930 * MAC CSR accesses. 1931 **/ 1932 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 1933 { 1934 u32 extcnf_ctrl; 1935 1936 DEBUGFUNC("e1000_release_swflag_ich8lan"); 1937 1938 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 1939 1940 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { 1941 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 1942 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 1943 } else { 1944 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n"); 1945 } 1946 1947 return; 1948 } 1949 1950 /** 1951 * e1000_check_mng_mode_ich8lan - Checks management mode 1952 * @hw: pointer to the HW structure 1953 * 1954 * This checks if the adapter has any manageability enabled. 1955 * This is a function pointer entry point only called by read/write 1956 * routines for the PHY and NVM parts. 1957 **/ 1958 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) 1959 { 1960 u32 fwsm; 1961 1962 DEBUGFUNC("e1000_check_mng_mode_ich8lan"); 1963 1964 fwsm = E1000_READ_REG(hw, E1000_FWSM); 1965 1966 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1967 ((fwsm & E1000_FWSM_MODE_MASK) == 1968 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1969 } 1970 1971 /** 1972 * e1000_check_mng_mode_pchlan - Checks management mode 1973 * @hw: pointer to the HW structure 1974 * 1975 * This checks if the adapter has iAMT enabled. 1976 * This is a function pointer entry point only called by read/write 1977 * routines for the PHY and NVM parts. 1978 **/ 1979 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) 1980 { 1981 u32 fwsm; 1982 1983 DEBUGFUNC("e1000_check_mng_mode_pchlan"); 1984 1985 fwsm = E1000_READ_REG(hw, E1000_FWSM); 1986 1987 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1988 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1989 } 1990 1991 /** 1992 * e1000_rar_set_pch2lan - Set receive address register 1993 * @hw: pointer to the HW structure 1994 * @addr: pointer to the receive address 1995 * @index: receive address array register 1996 * 1997 * Sets the receive address array register at index to the address passed 1998 * in by addr. For 82579, RAR[0] is the base address register that is to 1999 * contain the MAC address but RAR[1-6] are reserved for manageability (ME). 2000 * Use SHRA[0-3] in place of those reserved for ME. 2001 **/ 2002 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) 2003 { 2004 u32 rar_low, rar_high; 2005 2006 DEBUGFUNC("e1000_rar_set_pch2lan"); 2007 2008 /* HW expects these in little endian so we reverse the byte order 2009 * from network order (big endian) to little endian 2010 */ 2011 rar_low = ((u32) addr[0] | 2012 ((u32) addr[1] << 8) | 2013 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 2014 2015 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 2016 2017 /* If MAC address zero, no need to set the AV bit */ 2018 if (rar_low || rar_high) 2019 rar_high |= E1000_RAH_AV; 2020 2021 if (index == 0) { 2022 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); 2023 E1000_WRITE_FLUSH(hw); 2024 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); 2025 E1000_WRITE_FLUSH(hw); 2026 return E1000_SUCCESS; 2027 } 2028 2029 /* RAR[1-6] are owned by manageability. Skip those and program the 2030 * next address into the SHRA register array. 2031 */ 2032 if (index < (u32) (hw->mac.rar_entry_count)) { 2033 s32 ret_val; 2034 2035 ret_val = e1000_acquire_swflag_ich8lan(hw); 2036 if (ret_val) 2037 goto out; 2038 2039 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low); 2040 E1000_WRITE_FLUSH(hw); 2041 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high); 2042 E1000_WRITE_FLUSH(hw); 2043 2044 e1000_release_swflag_ich8lan(hw); 2045 2046 /* verify the register updates */ 2047 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) && 2048 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high)) 2049 return E1000_SUCCESS; 2050 2051 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", 2052 (index - 1), E1000_READ_REG(hw, E1000_FWSM)); 2053 } 2054 2055 out: 2056 DEBUGOUT1("Failed to write receive address at index %d\n", index); 2057 return -E1000_ERR_CONFIG; 2058 } 2059 2060 /** 2061 * e1000_rar_set_pch_lpt - Set receive address registers 2062 * @hw: pointer to the HW structure 2063 * @addr: pointer to the receive address 2064 * @index: receive address array register 2065 * 2066 * Sets the receive address register array at index to the address passed 2067 * in by addr. For LPT, RAR[0] is the base address register that is to 2068 * contain the MAC address. SHRA[0-10] are the shared receive address 2069 * registers that are shared between the Host and manageability engine (ME). 2070 **/ 2071 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) 2072 { 2073 u32 rar_low, rar_high; 2074 u32 wlock_mac; 2075 2076 DEBUGFUNC("e1000_rar_set_pch_lpt"); 2077 2078 /* HW expects these in little endian so we reverse the byte order 2079 * from network order (big endian) to little endian 2080 */ 2081 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | 2082 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 2083 2084 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 2085 2086 /* If MAC address zero, no need to set the AV bit */ 2087 if (rar_low || rar_high) 2088 rar_high |= E1000_RAH_AV; 2089 2090 if (index == 0) { 2091 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); 2092 E1000_WRITE_FLUSH(hw); 2093 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); 2094 E1000_WRITE_FLUSH(hw); 2095 return E1000_SUCCESS; 2096 } 2097 2098 /* The manageability engine (ME) can lock certain SHRAR registers that 2099 * it is using - those registers are unavailable for use. 2100 */ 2101 if (index < hw->mac.rar_entry_count) { 2102 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) & 2103 E1000_FWSM_WLOCK_MAC_MASK; 2104 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; 2105 2106 /* Check if all SHRAR registers are locked */ 2107 if (wlock_mac == 1) 2108 goto out; 2109 2110 if ((wlock_mac == 0) || (index <= wlock_mac)) { 2111 s32 ret_val; 2112 2113 ret_val = e1000_acquire_swflag_ich8lan(hw); 2114 2115 if (ret_val) 2116 goto out; 2117 2118 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1), 2119 rar_low); 2120 E1000_WRITE_FLUSH(hw); 2121 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1), 2122 rar_high); 2123 E1000_WRITE_FLUSH(hw); 2124 2125 e1000_release_swflag_ich8lan(hw); 2126 2127 /* verify the register updates */ 2128 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) && 2129 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high)) 2130 return E1000_SUCCESS; 2131 } 2132 } 2133 2134 out: 2135 DEBUGOUT1("Failed to write receive address at index %d\n", index); 2136 return -E1000_ERR_CONFIG; 2137 } 2138 2139 /** 2140 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses 2141 * @hw: pointer to the HW structure 2142 * @mc_addr_list: array of multicast addresses to program 2143 * @mc_addr_count: number of multicast addresses to program 2144 * 2145 * Updates entire Multicast Table Array of the PCH2 MAC and PHY. 2146 * The caller must have a packed mc_addr_list of multicast addresses. 2147 **/ 2148 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, 2149 u8 *mc_addr_list, 2150 u32 mc_addr_count) 2151 { 2152 u16 phy_reg = 0; 2153 int i; 2154 s32 ret_val; 2155 2156 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan"); 2157 2158 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count); 2159 2160 ret_val = hw->phy.ops.acquire(hw); 2161 if (ret_val) 2162 return; 2163 2164 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2165 if (ret_val) 2166 goto release; 2167 2168 for (i = 0; i < hw->mac.mta_reg_count; i++) { 2169 hw->phy.ops.write_reg_page(hw, BM_MTA(i), 2170 (u16)(hw->mac.mta_shadow[i] & 2171 0xFFFF)); 2172 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1), 2173 (u16)((hw->mac.mta_shadow[i] >> 16) & 2174 0xFFFF)); 2175 } 2176 2177 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2178 2179 release: 2180 hw->phy.ops.release(hw); 2181 } 2182 2183 /** 2184 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 2185 * @hw: pointer to the HW structure 2186 * 2187 * Checks if firmware is blocking the reset of the PHY. 2188 * This is a function pointer entry point only called by 2189 * reset routines. 2190 **/ 2191 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) 2192 { 2193 u32 fwsm; 2194 bool blocked = FALSE; 2195 int i = 0; 2196 2197 DEBUGFUNC("e1000_check_reset_block_ich8lan"); 2198 2199 do { 2200 fwsm = E1000_READ_REG(hw, E1000_FWSM); 2201 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) { 2202 blocked = TRUE; 2203 msec_delay(10); 2204 continue; 2205 } 2206 blocked = FALSE; 2207 } while (blocked && (i++ < 30)); 2208 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS; 2209 } 2210 2211 /** 2212 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states 2213 * @hw: pointer to the HW structure 2214 * 2215 * Assumes semaphore already acquired. 2216 * 2217 **/ 2218 static s32 e1000_write_smbus_addr(struct e1000_hw *hw) 2219 { 2220 u16 phy_data; 2221 u32 strap = E1000_READ_REG(hw, E1000_STRAP); 2222 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> 2223 E1000_STRAP_SMT_FREQ_SHIFT; 2224 s32 ret_val; 2225 2226 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 2227 2228 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); 2229 if (ret_val) 2230 return ret_val; 2231 2232 phy_data &= ~HV_SMB_ADDR_MASK; 2233 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 2234 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 2235 2236 if (hw->phy.type == e1000_phy_i217) { 2237 /* Restore SMBus frequency */ 2238 if (freq--) { 2239 phy_data &= ~HV_SMB_ADDR_FREQ_MASK; 2240 phy_data |= (freq & (1 << 0)) << 2241 HV_SMB_ADDR_FREQ_LOW_SHIFT; 2242 phy_data |= (freq & (1 << 1)) << 2243 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); 2244 } else { 2245 DEBUGOUT("Unsupported SMB frequency in PHY\n"); 2246 } 2247 } 2248 2249 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 2250 } 2251 2252 /** 2253 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 2254 * @hw: pointer to the HW structure 2255 * 2256 * SW should configure the LCD from the NVM extended configuration region 2257 * as a workaround for certain parts. 2258 **/ 2259 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 2260 { 2261 struct e1000_phy_info *phy = &hw->phy; 2262 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 2263 s32 ret_val = E1000_SUCCESS; 2264 u16 word_addr, reg_data, reg_addr, phy_page = 0; 2265 2266 DEBUGFUNC("e1000_sw_lcd_config_ich8lan"); 2267 2268 /* Initialize the PHY from the NVM on ICH platforms. This 2269 * is needed due to an issue where the NVM configuration is 2270 * not properly autoloaded after power transitions. 2271 * Therefore, after each PHY reset, we will load the 2272 * configuration data out of the NVM manually. 2273 */ 2274 switch (hw->mac.type) { 2275 case e1000_ich8lan: 2276 if (phy->type != e1000_phy_igp_3) 2277 return ret_val; 2278 2279 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) || 2280 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) { 2281 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 2282 break; 2283 } 2284 /* Fall-thru */ 2285 case e1000_pchlan: 2286 case e1000_pch2lan: 2287 case e1000_pch_lpt: 2288 case e1000_pch_spt: 2289 case e1000_pch_cnp: 2290 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 2291 break; 2292 default: 2293 return ret_val; 2294 } 2295 2296 ret_val = hw->phy.ops.acquire(hw); 2297 if (ret_val) 2298 return ret_val; 2299 2300 data = E1000_READ_REG(hw, E1000_FEXTNVM); 2301 if (!(data & sw_cfg_mask)) 2302 goto release; 2303 2304 /* Make sure HW does not configure LCD from PHY 2305 * extended configuration before SW configuration 2306 */ 2307 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 2308 if ((hw->mac.type < e1000_pch2lan) && 2309 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) 2310 goto release; 2311 2312 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); 2313 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 2314 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; 2315 if (!cnf_size) 2316 goto release; 2317 2318 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 2319 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 2320 2321 if (((hw->mac.type == e1000_pchlan) && 2322 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || 2323 (hw->mac.type > e1000_pchlan)) { 2324 /* HW configures the SMBus address and LEDs when the 2325 * OEM and LCD Write Enable bits are set in the NVM. 2326 * When both NVM bits are cleared, SW will configure 2327 * them instead. 2328 */ 2329 ret_val = e1000_write_smbus_addr(hw); 2330 if (ret_val) 2331 goto release; 2332 2333 data = E1000_READ_REG(hw, E1000_LEDCTL); 2334 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, 2335 (u16)data); 2336 if (ret_val) 2337 goto release; 2338 } 2339 2340 /* Configure LCD from extended configuration region. */ 2341 2342 /* cnf_base_addr is in DWORD */ 2343 word_addr = (u16)(cnf_base_addr << 1); 2344 2345 for (i = 0; i < cnf_size; i++) { 2346 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1, 2347 ®_data); 2348 if (ret_val) 2349 goto release; 2350 2351 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1), 2352 1, ®_addr); 2353 if (ret_val) 2354 goto release; 2355 2356 /* Save off the PHY page for future writes. */ 2357 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { 2358 phy_page = reg_data; 2359 continue; 2360 } 2361 2362 reg_addr &= PHY_REG_MASK; 2363 reg_addr |= phy_page; 2364 2365 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, 2366 reg_data); 2367 if (ret_val) 2368 goto release; 2369 } 2370 2371 release: 2372 hw->phy.ops.release(hw); 2373 return ret_val; 2374 } 2375 2376 /** 2377 * e1000_k1_gig_workaround_hv - K1 Si workaround 2378 * @hw: pointer to the HW structure 2379 * @link: link up bool flag 2380 * 2381 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning 2382 * from a lower speed. This workaround disables K1 whenever link is at 1Gig 2383 * If link is down, the function will restore the default K1 setting located 2384 * in the NVM. 2385 **/ 2386 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) 2387 { 2388 s32 ret_val = E1000_SUCCESS; 2389 u16 status_reg = 0; 2390 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; 2391 2392 DEBUGFUNC("e1000_k1_gig_workaround_hv"); 2393 2394 if (hw->mac.type != e1000_pchlan) 2395 return E1000_SUCCESS; 2396 2397 /* Wrap the whole flow with the sw flag */ 2398 ret_val = hw->phy.ops.acquire(hw); 2399 if (ret_val) 2400 return ret_val; 2401 2402 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 2403 if (link) { 2404 if (hw->phy.type == e1000_phy_82578) { 2405 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, 2406 &status_reg); 2407 if (ret_val) 2408 goto release; 2409 2410 status_reg &= (BM_CS_STATUS_LINK_UP | 2411 BM_CS_STATUS_RESOLVED | 2412 BM_CS_STATUS_SPEED_MASK); 2413 2414 if (status_reg == (BM_CS_STATUS_LINK_UP | 2415 BM_CS_STATUS_RESOLVED | 2416 BM_CS_STATUS_SPEED_1000)) 2417 k1_enable = FALSE; 2418 } 2419 2420 if (hw->phy.type == e1000_phy_82577) { 2421 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, 2422 &status_reg); 2423 if (ret_val) 2424 goto release; 2425 2426 status_reg &= (HV_M_STATUS_LINK_UP | 2427 HV_M_STATUS_AUTONEG_COMPLETE | 2428 HV_M_STATUS_SPEED_MASK); 2429 2430 if (status_reg == (HV_M_STATUS_LINK_UP | 2431 HV_M_STATUS_AUTONEG_COMPLETE | 2432 HV_M_STATUS_SPEED_1000)) 2433 k1_enable = FALSE; 2434 } 2435 2436 /* Link stall fix for link up */ 2437 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 2438 0x0100); 2439 if (ret_val) 2440 goto release; 2441 2442 } else { 2443 /* Link stall fix for link down */ 2444 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 2445 0x4100); 2446 if (ret_val) 2447 goto release; 2448 } 2449 2450 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); 2451 2452 release: 2453 hw->phy.ops.release(hw); 2454 2455 return ret_val; 2456 } 2457 2458 /** 2459 * e1000_configure_k1_ich8lan - Configure K1 power state 2460 * @hw: pointer to the HW structure 2461 * @enable: K1 state to configure 2462 * 2463 * Configure the K1 power state based on the provided parameter. 2464 * Assumes semaphore already acquired. 2465 * 2466 * Success returns 0, Failure returns -E1000_ERR_PHY (-2) 2467 **/ 2468 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) 2469 { 2470 s32 ret_val; 2471 u32 ctrl_reg = 0; 2472 u32 ctrl_ext = 0; 2473 u32 reg = 0; 2474 u16 kmrn_reg = 0; 2475 2476 DEBUGFUNC("e1000_configure_k1_ich8lan"); 2477 2478 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 2479 &kmrn_reg); 2480 if (ret_val) 2481 return ret_val; 2482 2483 if (k1_enable) 2484 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; 2485 else 2486 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; 2487 2488 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 2489 kmrn_reg); 2490 if (ret_val) 2491 return ret_val; 2492 2493 usec_delay(20); 2494 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2495 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); 2496 2497 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 2498 reg |= E1000_CTRL_FRCSPD; 2499 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2500 2501 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); 2502 E1000_WRITE_FLUSH(hw); 2503 usec_delay(20); 2504 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); 2505 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 2506 E1000_WRITE_FLUSH(hw); 2507 usec_delay(20); 2508 2509 return E1000_SUCCESS; 2510 } 2511 2512 /** 2513 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration 2514 * @hw: pointer to the HW structure 2515 * @d0_state: boolean if entering d0 or d3 device state 2516 * 2517 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are 2518 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit 2519 * in NVM determines whether HW should configure LPLU and Gbe Disable. 2520 **/ 2521 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) 2522 { 2523 s32 ret_val = 0; 2524 u32 mac_reg; 2525 u16 oem_reg; 2526 2527 DEBUGFUNC("e1000_oem_bits_config_ich8lan"); 2528 2529 if (hw->mac.type < e1000_pchlan) 2530 return ret_val; 2531 2532 ret_val = hw->phy.ops.acquire(hw); 2533 if (ret_val) 2534 return ret_val; 2535 2536 if (hw->mac.type == e1000_pchlan) { 2537 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 2538 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 2539 goto release; 2540 } 2541 2542 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM); 2543 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) 2544 goto release; 2545 2546 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL); 2547 2548 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); 2549 if (ret_val) 2550 goto release; 2551 2552 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); 2553 2554 if (d0_state) { 2555 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) 2556 oem_reg |= HV_OEM_BITS_GBE_DIS; 2557 2558 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 2559 oem_reg |= HV_OEM_BITS_LPLU; 2560 } else { 2561 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | 2562 E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) 2563 oem_reg |= HV_OEM_BITS_GBE_DIS; 2564 2565 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | 2566 E1000_PHY_CTRL_NOND0A_LPLU)) 2567 oem_reg |= HV_OEM_BITS_LPLU; 2568 } 2569 2570 /* Set Restart auto-neg to activate the bits */ 2571 if ((d0_state || (hw->mac.type != e1000_pchlan)) && 2572 !hw->phy.ops.check_reset_block(hw)) 2573 oem_reg |= HV_OEM_BITS_RESTART_AN; 2574 2575 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 2576 2577 release: 2578 hw->phy.ops.release(hw); 2579 2580 return ret_val; 2581 } 2582 2583 2584 /** 2585 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode 2586 * @hw: pointer to the HW structure 2587 **/ 2588 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) 2589 { 2590 s32 ret_val; 2591 u16 data; 2592 2593 DEBUGFUNC("e1000_set_mdio_slow_mode_hv"); 2594 2595 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data); 2596 if (ret_val) 2597 return ret_val; 2598 2599 data |= HV_KMRN_MDIO_SLOW; 2600 2601 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data); 2602 2603 return ret_val; 2604 } 2605 2606 /** 2607 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 2608 * done after every PHY reset. 2609 **/ 2610 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) 2611 { 2612 s32 ret_val = E1000_SUCCESS; 2613 u16 phy_data; 2614 2615 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan"); 2616 2617 if (hw->mac.type != e1000_pchlan) 2618 return E1000_SUCCESS; 2619 2620 /* Set MDIO slow mode before any other MDIO access */ 2621 if (hw->phy.type == e1000_phy_82577) { 2622 ret_val = e1000_set_mdio_slow_mode_hv(hw); 2623 if (ret_val) 2624 return ret_val; 2625 } 2626 2627 if (((hw->phy.type == e1000_phy_82577) && 2628 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || 2629 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { 2630 /* Disable generation of early preamble */ 2631 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431); 2632 if (ret_val) 2633 return ret_val; 2634 2635 /* Preamble tuning for SSC */ 2636 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, 2637 0xA204); 2638 if (ret_val) 2639 return ret_val; 2640 } 2641 2642 if (hw->phy.type == e1000_phy_82578) { 2643 /* Return registers to default by doing a soft reset then 2644 * writing 0x3140 to the control register. 2645 */ 2646 if (hw->phy.revision < 2) { 2647 e1000_phy_sw_reset_generic(hw); 2648 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, 2649 0x3140); 2650 } 2651 } 2652 2653 /* Select page 0 */ 2654 ret_val = hw->phy.ops.acquire(hw); 2655 if (ret_val) 2656 return ret_val; 2657 2658 hw->phy.addr = 1; 2659 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 2660 hw->phy.ops.release(hw); 2661 if (ret_val) 2662 return ret_val; 2663 2664 /* Configure the K1 Si workaround during phy reset assuming there is 2665 * link so that it disables K1 if link is in 1Gbps. 2666 */ 2667 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE); 2668 if (ret_val) 2669 return ret_val; 2670 2671 /* Workaround for link disconnects on a busy hub in half duplex */ 2672 ret_val = hw->phy.ops.acquire(hw); 2673 if (ret_val) 2674 return ret_val; 2675 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); 2676 if (ret_val) 2677 goto release; 2678 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, 2679 phy_data & 0x00FF); 2680 if (ret_val) 2681 goto release; 2682 2683 /* set MSE higher to enable link to stay up when noise is high */ 2684 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); 2685 release: 2686 hw->phy.ops.release(hw); 2687 2688 return ret_val; 2689 } 2690 2691 /** 2692 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY 2693 * @hw: pointer to the HW structure 2694 **/ 2695 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) 2696 { 2697 u32 mac_reg; 2698 u16 i, phy_reg = 0; 2699 s32 ret_val; 2700 2701 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan"); 2702 2703 ret_val = hw->phy.ops.acquire(hw); 2704 if (ret_val) 2705 return; 2706 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2707 if (ret_val) 2708 goto release; 2709 2710 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ 2711 for (i = 0; i < (hw->mac.rar_entry_count); i++) { 2712 mac_reg = E1000_READ_REG(hw, E1000_RAL(i)); 2713 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), 2714 (u16)(mac_reg & 0xFFFF)); 2715 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), 2716 (u16)((mac_reg >> 16) & 0xFFFF)); 2717 2718 mac_reg = E1000_READ_REG(hw, E1000_RAH(i)); 2719 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), 2720 (u16)(mac_reg & 0xFFFF)); 2721 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), 2722 (u16)((mac_reg & E1000_RAH_AV) 2723 >> 16)); 2724 } 2725 2726 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); 2727 2728 release: 2729 hw->phy.ops.release(hw); 2730 } 2731 2732 static u32 e1000_calc_rx_da_crc(u8 mac[]) 2733 { 2734 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ 2735 u32 i, j, mask, crc; 2736 2737 DEBUGFUNC("e1000_calc_rx_da_crc"); 2738 2739 crc = 0xffffffff; 2740 for (i = 0; i < 6; i++) { 2741 crc = crc ^ mac[i]; 2742 for (j = 8; j > 0; j--) { 2743 mask = (crc & 1) * (-1); 2744 crc = (crc >> 1) ^ (poly & mask); 2745 } 2746 } 2747 return ~crc; 2748 } 2749 2750 /** 2751 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 2752 * with 82579 PHY 2753 * @hw: pointer to the HW structure 2754 * @enable: flag to enable/disable workaround when enabling/disabling jumbos 2755 **/ 2756 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) 2757 { 2758 s32 ret_val = E1000_SUCCESS; 2759 u16 phy_reg, data; 2760 u32 mac_reg; 2761 u16 i; 2762 2763 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan"); 2764 2765 if (hw->mac.type < e1000_pch2lan) 2766 return E1000_SUCCESS; 2767 2768 /* disable Rx path while enabling/disabling workaround */ 2769 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg); 2770 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), 2771 phy_reg | (1 << 14)); 2772 if (ret_val) 2773 return ret_val; 2774 2775 if (enable) { 2776 /* Write Rx addresses (rar_entry_count for RAL/H, and 2777 * SHRAL/H) and initial CRC values to the MAC 2778 */ 2779 for (i = 0; i < hw->mac.rar_entry_count; i++) { 2780 u8 mac_addr[ETH_ADDR_LEN] = {0}; 2781 u32 addr_high, addr_low; 2782 2783 addr_high = E1000_READ_REG(hw, E1000_RAH(i)); 2784 if (!(addr_high & E1000_RAH_AV)) 2785 continue; 2786 addr_low = E1000_READ_REG(hw, E1000_RAL(i)); 2787 mac_addr[0] = (addr_low & 0xFF); 2788 mac_addr[1] = ((addr_low >> 8) & 0xFF); 2789 mac_addr[2] = ((addr_low >> 16) & 0xFF); 2790 mac_addr[3] = ((addr_low >> 24) & 0xFF); 2791 mac_addr[4] = (addr_high & 0xFF); 2792 mac_addr[5] = ((addr_high >> 8) & 0xFF); 2793 2794 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i), 2795 e1000_calc_rx_da_crc(mac_addr)); 2796 } 2797 2798 /* Write Rx addresses to the PHY */ 2799 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 2800 2801 /* Enable jumbo frame workaround in the MAC */ 2802 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); 2803 mac_reg &= ~(1 << 14); 2804 mac_reg |= (7 << 15); 2805 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); 2806 2807 mac_reg = E1000_READ_REG(hw, E1000_RCTL); 2808 mac_reg |= E1000_RCTL_SECRC; 2809 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); 2810 2811 ret_val = e1000_read_kmrn_reg_generic(hw, 2812 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2813 &data); 2814 if (ret_val) 2815 return ret_val; 2816 ret_val = e1000_write_kmrn_reg_generic(hw, 2817 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2818 data | (1 << 0)); 2819 if (ret_val) 2820 return ret_val; 2821 ret_val = e1000_read_kmrn_reg_generic(hw, 2822 E1000_KMRNCTRLSTA_HD_CTRL, 2823 &data); 2824 if (ret_val) 2825 return ret_val; 2826 data &= ~(0xF << 8); 2827 data |= (0xB << 8); 2828 ret_val = e1000_write_kmrn_reg_generic(hw, 2829 E1000_KMRNCTRLSTA_HD_CTRL, 2830 data); 2831 if (ret_val) 2832 return ret_val; 2833 2834 /* Enable jumbo frame workaround in the PHY */ 2835 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); 2836 data &= ~(0x7F << 5); 2837 data |= (0x37 << 5); 2838 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); 2839 if (ret_val) 2840 return ret_val; 2841 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); 2842 data &= ~(1 << 13); 2843 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); 2844 if (ret_val) 2845 return ret_val; 2846 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); 2847 data &= ~(0x3FF << 2); 2848 data |= (E1000_TX_PTR_GAP << 2); 2849 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); 2850 if (ret_val) 2851 return ret_val; 2852 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100); 2853 if (ret_val) 2854 return ret_val; 2855 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); 2856 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | 2857 (1 << 10)); 2858 if (ret_val) 2859 return ret_val; 2860 } else { 2861 /* Write MAC register values back to h/w defaults */ 2862 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); 2863 mac_reg &= ~(0xF << 14); 2864 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); 2865 2866 mac_reg = E1000_READ_REG(hw, E1000_RCTL); 2867 mac_reg &= ~E1000_RCTL_SECRC; 2868 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); 2869 2870 ret_val = e1000_read_kmrn_reg_generic(hw, 2871 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2872 &data); 2873 if (ret_val) 2874 return ret_val; 2875 ret_val = e1000_write_kmrn_reg_generic(hw, 2876 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2877 data & ~(1 << 0)); 2878 if (ret_val) 2879 return ret_val; 2880 ret_val = e1000_read_kmrn_reg_generic(hw, 2881 E1000_KMRNCTRLSTA_HD_CTRL, 2882 &data); 2883 if (ret_val) 2884 return ret_val; 2885 data &= ~(0xF << 8); 2886 data |= (0xB << 8); 2887 ret_val = e1000_write_kmrn_reg_generic(hw, 2888 E1000_KMRNCTRLSTA_HD_CTRL, 2889 data); 2890 if (ret_val) 2891 return ret_val; 2892 2893 /* Write PHY register values back to h/w defaults */ 2894 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); 2895 data &= ~(0x7F << 5); 2896 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); 2897 if (ret_val) 2898 return ret_val; 2899 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); 2900 data |= (1 << 13); 2901 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); 2902 if (ret_val) 2903 return ret_val; 2904 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); 2905 data &= ~(0x3FF << 2); 2906 data |= (0x8 << 2); 2907 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); 2908 if (ret_val) 2909 return ret_val; 2910 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00); 2911 if (ret_val) 2912 return ret_val; 2913 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); 2914 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & 2915 ~(1 << 10)); 2916 if (ret_val) 2917 return ret_val; 2918 } 2919 2920 /* re-enable Rx path after enabling/disabling workaround */ 2921 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & 2922 ~(1 << 14)); 2923 } 2924 2925 /** 2926 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be 2927 * done after every PHY reset. 2928 **/ 2929 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) 2930 { 2931 s32 ret_val = E1000_SUCCESS; 2932 2933 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan"); 2934 2935 if (hw->mac.type != e1000_pch2lan) 2936 return E1000_SUCCESS; 2937 2938 /* Set MDIO slow mode before any other MDIO access */ 2939 ret_val = e1000_set_mdio_slow_mode_hv(hw); 2940 if (ret_val) 2941 return ret_val; 2942 2943 ret_val = hw->phy.ops.acquire(hw); 2944 if (ret_val) 2945 return ret_val; 2946 /* set MSE higher to enable link to stay up when noise is high */ 2947 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); 2948 if (ret_val) 2949 goto release; 2950 /* drop link after 5 times MSE threshold was reached */ 2951 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); 2952 release: 2953 hw->phy.ops.release(hw); 2954 2955 return ret_val; 2956 } 2957 2958 /** 2959 * e1000_k1_gig_workaround_lv - K1 Si workaround 2960 * @hw: pointer to the HW structure 2961 * 2962 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps 2963 * Disable K1 for 1000 and 100 speeds 2964 **/ 2965 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 2966 { 2967 s32 ret_val = E1000_SUCCESS; 2968 u16 status_reg = 0; 2969 2970 DEBUGFUNC("e1000_k1_workaround_lv"); 2971 2972 if (hw->mac.type != e1000_pch2lan) 2973 return E1000_SUCCESS; 2974 2975 /* Set K1 beacon duration based on 10Mbs speed */ 2976 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg); 2977 if (ret_val) 2978 return ret_val; 2979 2980 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 2981 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 2982 if (status_reg & 2983 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { 2984 u16 pm_phy_reg; 2985 2986 /* LV 1G/100 Packet drop issue wa */ 2987 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL, 2988 &pm_phy_reg); 2989 if (ret_val) 2990 return ret_val; 2991 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; 2992 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, 2993 pm_phy_reg); 2994 if (ret_val) 2995 return ret_val; 2996 } else { 2997 u32 mac_reg; 2998 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); 2999 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 3000 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 3001 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); 3002 } 3003 } 3004 3005 return ret_val; 3006 } 3007 3008 /** 3009 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware 3010 * @hw: pointer to the HW structure 3011 * @gate: boolean set to TRUE to gate, FALSE to ungate 3012 * 3013 * Gate/ungate the automatic PHY configuration via hardware; perform 3014 * the configuration via software instead. 3015 **/ 3016 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) 3017 { 3018 u32 extcnf_ctrl; 3019 3020 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan"); 3021 3022 if (hw->mac.type < e1000_pch2lan) 3023 return; 3024 3025 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 3026 3027 if (gate) 3028 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; 3029 else 3030 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; 3031 3032 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 3033 } 3034 3035 /** 3036 * e1000_lan_init_done_ich8lan - Check for PHY config completion 3037 * @hw: pointer to the HW structure 3038 * 3039 * Check the appropriate indication the MAC has finished configuring the 3040 * PHY after a software reset. 3041 **/ 3042 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) 3043 { 3044 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; 3045 3046 DEBUGFUNC("e1000_lan_init_done_ich8lan"); 3047 3048 /* Wait for basic configuration completes before proceeding */ 3049 do { 3050 data = E1000_READ_REG(hw, E1000_STATUS); 3051 data &= E1000_STATUS_LAN_INIT_DONE; 3052 usec_delay(100); 3053 } while ((!data) && --loop); 3054 3055 /* If basic configuration is incomplete before the above loop 3056 * count reaches 0, loading the configuration from NVM will 3057 * leave the PHY in a bad state possibly resulting in no link. 3058 */ 3059 if (loop == 0) 3060 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); 3061 3062 /* Clear the Init Done bit for the next init event */ 3063 data = E1000_READ_REG(hw, E1000_STATUS); 3064 data &= ~E1000_STATUS_LAN_INIT_DONE; 3065 E1000_WRITE_REG(hw, E1000_STATUS, data); 3066 } 3067 3068 /** 3069 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset 3070 * @hw: pointer to the HW structure 3071 **/ 3072 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) 3073 { 3074 s32 ret_val = E1000_SUCCESS; 3075 u16 reg; 3076 3077 DEBUGFUNC("e1000_post_phy_reset_ich8lan"); 3078 3079 if (hw->phy.ops.check_reset_block(hw)) 3080 return E1000_SUCCESS; 3081 3082 /* Allow time for h/w to get to quiescent state after reset */ 3083 msec_delay(10); 3084 3085 /* Perform any necessary post-reset workarounds */ 3086 switch (hw->mac.type) { 3087 case e1000_pchlan: 3088 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 3089 if (ret_val) 3090 return ret_val; 3091 break; 3092 case e1000_pch2lan: 3093 ret_val = e1000_lv_phy_workarounds_ich8lan(hw); 3094 if (ret_val) 3095 return ret_val; 3096 break; 3097 default: 3098 break; 3099 } 3100 3101 /* Clear the host wakeup bit after lcd reset */ 3102 if (hw->mac.type >= e1000_pchlan) { 3103 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®); 3104 reg &= ~BM_WUC_HOST_WU_BIT; 3105 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg); 3106 } 3107 3108 /* Configure the LCD with the extended configuration region in NVM */ 3109 ret_val = e1000_sw_lcd_config_ich8lan(hw); 3110 if (ret_val) 3111 return ret_val; 3112 3113 /* Configure the LCD with the OEM bits in NVM */ 3114 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE); 3115 3116 if (hw->mac.type == e1000_pch2lan) { 3117 /* Ungate automatic PHY configuration on non-managed 82579 */ 3118 if (!(E1000_READ_REG(hw, E1000_FWSM) & 3119 E1000_ICH_FWSM_FW_VALID)) { 3120 msec_delay(10); 3121 e1000_gate_hw_phy_config_ich8lan(hw, FALSE); 3122 } 3123 3124 /* Set EEE LPI Update Timer to 200usec */ 3125 ret_val = hw->phy.ops.acquire(hw); 3126 if (ret_val) 3127 return ret_val; 3128 ret_val = e1000_write_emi_reg_locked(hw, 3129 I82579_LPI_UPDATE_TIMER, 3130 0x1387); 3131 hw->phy.ops.release(hw); 3132 } 3133 3134 return ret_val; 3135 } 3136 3137 /** 3138 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 3139 * @hw: pointer to the HW structure 3140 * 3141 * Resets the PHY 3142 * This is a function pointer entry point called by drivers 3143 * or other shared routines. 3144 **/ 3145 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 3146 { 3147 s32 ret_val = E1000_SUCCESS; 3148 3149 DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); 3150 3151 /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 3152 if ((hw->mac.type == e1000_pch2lan) && 3153 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) 3154 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 3155 3156 ret_val = e1000_phy_hw_reset_generic(hw); 3157 if (ret_val) 3158 return ret_val; 3159 3160 return e1000_post_phy_reset_ich8lan(hw); 3161 } 3162 3163 /** 3164 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state 3165 * @hw: pointer to the HW structure 3166 * @active: TRUE to enable LPLU, FALSE to disable 3167 * 3168 * Sets the LPLU state according to the active flag. For PCH, if OEM write 3169 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set 3170 * the phy speed. This function will manually set the LPLU bit and restart 3171 * auto-neg as hw would do. D3 and D0 LPLU will call the same function 3172 * since it configures the same bit. 3173 **/ 3174 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) 3175 { 3176 s32 ret_val; 3177 u16 oem_reg; 3178 3179 DEBUGFUNC("e1000_set_lplu_state_pchlan"); 3180 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg); 3181 if (ret_val) 3182 return ret_val; 3183 3184 if (active) 3185 oem_reg |= HV_OEM_BITS_LPLU; 3186 else 3187 oem_reg &= ~HV_OEM_BITS_LPLU; 3188 3189 if (!hw->phy.ops.check_reset_block(hw)) 3190 oem_reg |= HV_OEM_BITS_RESTART_AN; 3191 3192 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg); 3193 } 3194 3195 /** 3196 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 3197 * @hw: pointer to the HW structure 3198 * @active: TRUE to enable LPLU, FALSE to disable 3199 * 3200 * Sets the LPLU D0 state according to the active flag. When 3201 * activating LPLU this function also disables smart speed 3202 * and vice versa. LPLU will not be activated unless the 3203 * device autonegotiation advertisement meets standards of 3204 * either 10 or 10/100 or 10/100/1000 at all duplexes. 3205 * This is a function pointer entry point only called by 3206 * PHY setup routines. 3207 **/ 3208 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 3209 { 3210 struct e1000_phy_info *phy = &hw->phy; 3211 u32 phy_ctrl; 3212 s32 ret_val = E1000_SUCCESS; 3213 u16 data; 3214 3215 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); 3216 3217 if (phy->type == e1000_phy_ife) 3218 return E1000_SUCCESS; 3219 3220 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 3221 3222 if (active) { 3223 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 3224 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3225 3226 if (phy->type != e1000_phy_igp_3) 3227 return E1000_SUCCESS; 3228 3229 /* Call gig speed drop workaround on LPLU before accessing 3230 * any PHY registers 3231 */ 3232 if (hw->mac.type == e1000_ich8lan) 3233 e1000_gig_downshift_workaround_ich8lan(hw); 3234 3235 /* When LPLU is enabled, we should disable SmartSpeed */ 3236 ret_val = phy->ops.read_reg(hw, 3237 IGP01E1000_PHY_PORT_CONFIG, 3238 &data); 3239 if (ret_val) 3240 return ret_val; 3241 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3242 ret_val = phy->ops.write_reg(hw, 3243 IGP01E1000_PHY_PORT_CONFIG, 3244 data); 3245 if (ret_val) 3246 return ret_val; 3247 } else { 3248 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 3249 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3250 3251 if (phy->type != e1000_phy_igp_3) 3252 return E1000_SUCCESS; 3253 3254 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 3255 * during Dx states where the power conservation is most 3256 * important. During driver activity we should enable 3257 * SmartSpeed, so performance is maintained. 3258 */ 3259 if (phy->smart_speed == e1000_smart_speed_on) { 3260 ret_val = phy->ops.read_reg(hw, 3261 IGP01E1000_PHY_PORT_CONFIG, 3262 &data); 3263 if (ret_val) 3264 return ret_val; 3265 3266 data |= IGP01E1000_PSCFR_SMART_SPEED; 3267 ret_val = phy->ops.write_reg(hw, 3268 IGP01E1000_PHY_PORT_CONFIG, 3269 data); 3270 if (ret_val) 3271 return ret_val; 3272 } else if (phy->smart_speed == e1000_smart_speed_off) { 3273 ret_val = phy->ops.read_reg(hw, 3274 IGP01E1000_PHY_PORT_CONFIG, 3275 &data); 3276 if (ret_val) 3277 return ret_val; 3278 3279 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3280 ret_val = phy->ops.write_reg(hw, 3281 IGP01E1000_PHY_PORT_CONFIG, 3282 data); 3283 if (ret_val) 3284 return ret_val; 3285 } 3286 } 3287 3288 return E1000_SUCCESS; 3289 } 3290 3291 /** 3292 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state 3293 * @hw: pointer to the HW structure 3294 * @active: TRUE to enable LPLU, FALSE to disable 3295 * 3296 * Sets the LPLU D3 state according to the active flag. When 3297 * activating LPLU this function also disables smart speed 3298 * and vice versa. LPLU will not be activated unless the 3299 * device autonegotiation advertisement meets standards of 3300 * either 10 or 10/100 or 10/100/1000 at all duplexes. 3301 * This is a function pointer entry point only called by 3302 * PHY setup routines. 3303 **/ 3304 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) 3305 { 3306 struct e1000_phy_info *phy = &hw->phy; 3307 u32 phy_ctrl; 3308 s32 ret_val = E1000_SUCCESS; 3309 u16 data; 3310 3311 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); 3312 3313 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 3314 3315 if (!active) { 3316 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 3317 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3318 3319 if (phy->type != e1000_phy_igp_3) 3320 return E1000_SUCCESS; 3321 3322 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 3323 * during Dx states where the power conservation is most 3324 * important. During driver activity we should enable 3325 * SmartSpeed, so performance is maintained. 3326 */ 3327 if (phy->smart_speed == e1000_smart_speed_on) { 3328 ret_val = phy->ops.read_reg(hw, 3329 IGP01E1000_PHY_PORT_CONFIG, 3330 &data); 3331 if (ret_val) 3332 return ret_val; 3333 3334 data |= IGP01E1000_PSCFR_SMART_SPEED; 3335 ret_val = phy->ops.write_reg(hw, 3336 IGP01E1000_PHY_PORT_CONFIG, 3337 data); 3338 if (ret_val) 3339 return ret_val; 3340 } else if (phy->smart_speed == e1000_smart_speed_off) { 3341 ret_val = phy->ops.read_reg(hw, 3342 IGP01E1000_PHY_PORT_CONFIG, 3343 &data); 3344 if (ret_val) 3345 return ret_val; 3346 3347 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3348 ret_val = phy->ops.write_reg(hw, 3349 IGP01E1000_PHY_PORT_CONFIG, 3350 data); 3351 if (ret_val) 3352 return ret_val; 3353 } 3354 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 3355 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 3356 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 3357 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 3358 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 3359 3360 if (phy->type != e1000_phy_igp_3) 3361 return E1000_SUCCESS; 3362 3363 /* Call gig speed drop workaround on LPLU before accessing 3364 * any PHY registers 3365 */ 3366 if (hw->mac.type == e1000_ich8lan) 3367 e1000_gig_downshift_workaround_ich8lan(hw); 3368 3369 /* When LPLU is enabled, we should disable SmartSpeed */ 3370 ret_val = phy->ops.read_reg(hw, 3371 IGP01E1000_PHY_PORT_CONFIG, 3372 &data); 3373 if (ret_val) 3374 return ret_val; 3375 3376 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 3377 ret_val = phy->ops.write_reg(hw, 3378 IGP01E1000_PHY_PORT_CONFIG, 3379 data); 3380 } 3381 3382 return ret_val; 3383 } 3384 3385 /** 3386 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 3387 * @hw: pointer to the HW structure 3388 * @bank: pointer to the variable that returns the active bank 3389 * 3390 * Reads signature byte from the NVM using the flash access registers. 3391 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. 3392 **/ 3393 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) 3394 { 3395 u32 eecd; 3396 struct e1000_nvm_info *nvm = &hw->nvm; 3397 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); 3398 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; 3399 u32 nvm_dword = 0; 3400 u8 sig_byte = 0; 3401 s32 ret_val; 3402 3403 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan"); 3404 3405 switch (hw->mac.type) { 3406 case e1000_pch_spt: 3407 case e1000_pch_cnp: 3408 bank1_offset = nvm->flash_bank_size; 3409 act_offset = E1000_ICH_NVM_SIG_WORD; 3410 3411 /* set bank to 0 in case flash read fails */ 3412 *bank = 0; 3413 3414 /* Check bank 0 */ 3415 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, 3416 &nvm_dword); 3417 if (ret_val) 3418 return ret_val; 3419 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); 3420 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3421 E1000_ICH_NVM_SIG_VALUE) { 3422 *bank = 0; 3423 return E1000_SUCCESS; 3424 } 3425 3426 /* Check bank 1 */ 3427 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset + 3428 bank1_offset, 3429 &nvm_dword); 3430 if (ret_val) 3431 return ret_val; 3432 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); 3433 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3434 E1000_ICH_NVM_SIG_VALUE) { 3435 *bank = 1; 3436 return E1000_SUCCESS; 3437 } 3438 3439 DEBUGOUT("ERROR: No valid NVM bank present\n"); 3440 return -E1000_ERR_NVM; 3441 case e1000_ich8lan: 3442 case e1000_ich9lan: 3443 eecd = E1000_READ_REG(hw, E1000_EECD); 3444 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == 3445 E1000_EECD_SEC1VAL_VALID_MASK) { 3446 if (eecd & E1000_EECD_SEC1VAL) 3447 *bank = 1; 3448 else 3449 *bank = 0; 3450 3451 return E1000_SUCCESS; 3452 } 3453 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n"); 3454 /* fall-thru */ 3455 default: 3456 /* set bank to 0 in case flash read fails */ 3457 *bank = 0; 3458 3459 /* Check bank 0 */ 3460 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, 3461 &sig_byte); 3462 if (ret_val) 3463 return ret_val; 3464 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3465 E1000_ICH_NVM_SIG_VALUE) { 3466 *bank = 0; 3467 return E1000_SUCCESS; 3468 } 3469 3470 /* Check bank 1 */ 3471 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + 3472 bank1_offset, 3473 &sig_byte); 3474 if (ret_val) 3475 return ret_val; 3476 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 3477 E1000_ICH_NVM_SIG_VALUE) { 3478 *bank = 1; 3479 return E1000_SUCCESS; 3480 } 3481 3482 DEBUGOUT("ERROR: No valid NVM bank present\n"); 3483 return -E1000_ERR_NVM; 3484 } 3485 } 3486 3487 /** 3488 * e1000_read_nvm_spt - NVM access for SPT 3489 * @hw: pointer to the HW structure 3490 * @offset: The offset (in bytes) of the word(s) to read. 3491 * @words: Size of data to read in words. 3492 * @data: pointer to the word(s) to read at offset. 3493 * 3494 * Reads a word(s) from the NVM 3495 **/ 3496 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, 3497 u16 *data) 3498 { 3499 struct e1000_nvm_info *nvm = &hw->nvm; 3500 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3501 u32 act_offset; 3502 s32 ret_val = E1000_SUCCESS; 3503 u32 bank = 0; 3504 u32 dword = 0; 3505 u16 offset_to_read; 3506 u16 i; 3507 3508 DEBUGFUNC("e1000_read_nvm_spt"); 3509 3510 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 3511 (words == 0)) { 3512 DEBUGOUT("nvm parameter(s) out of bounds\n"); 3513 ret_val = -E1000_ERR_NVM; 3514 goto out; 3515 } 3516 3517 nvm->ops.acquire(hw); 3518 3519 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3520 if (ret_val != E1000_SUCCESS) { 3521 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 3522 bank = 0; 3523 } 3524 3525 act_offset = (bank) ? nvm->flash_bank_size : 0; 3526 act_offset += offset; 3527 3528 ret_val = E1000_SUCCESS; 3529 3530 for (i = 0; i < words; i += 2) { 3531 if (words - i == 1) { 3532 if (dev_spec->shadow_ram[offset+i].modified) { 3533 data[i] = dev_spec->shadow_ram[offset+i].value; 3534 } else { 3535 offset_to_read = act_offset + i - 3536 ((act_offset + i) % 2); 3537 ret_val = 3538 e1000_read_flash_dword_ich8lan(hw, 3539 offset_to_read, 3540 &dword); 3541 if (ret_val) 3542 break; 3543 if ((act_offset + i) % 2 == 0) 3544 data[i] = (u16)(dword & 0xFFFF); 3545 else 3546 data[i] = (u16)((dword >> 16) & 0xFFFF); 3547 } 3548 } else { 3549 offset_to_read = act_offset + i; 3550 if (!(dev_spec->shadow_ram[offset+i].modified) || 3551 !(dev_spec->shadow_ram[offset+i+1].modified)) { 3552 ret_val = 3553 e1000_read_flash_dword_ich8lan(hw, 3554 offset_to_read, 3555 &dword); 3556 if (ret_val) 3557 break; 3558 } 3559 if (dev_spec->shadow_ram[offset+i].modified) 3560 data[i] = dev_spec->shadow_ram[offset+i].value; 3561 else 3562 data[i] = (u16) (dword & 0xFFFF); 3563 if (dev_spec->shadow_ram[offset+i].modified) 3564 data[i+1] = 3565 dev_spec->shadow_ram[offset+i+1].value; 3566 else 3567 data[i+1] = (u16) (dword >> 16 & 0xFFFF); 3568 } 3569 } 3570 3571 nvm->ops.release(hw); 3572 3573 out: 3574 if (ret_val) 3575 DEBUGOUT1("NVM read error: %d\n", ret_val); 3576 3577 return ret_val; 3578 } 3579 3580 /** 3581 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 3582 * @hw: pointer to the HW structure 3583 * @offset: The offset (in bytes) of the word(s) to read. 3584 * @words: Size of data to read in words 3585 * @data: Pointer to the word(s) to read at offset. 3586 * 3587 * Reads a word(s) from the NVM using the flash access registers. 3588 **/ 3589 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 3590 u16 *data) 3591 { 3592 struct e1000_nvm_info *nvm = &hw->nvm; 3593 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3594 u32 act_offset; 3595 s32 ret_val = E1000_SUCCESS; 3596 u32 bank = 0; 3597 u16 i, word; 3598 3599 DEBUGFUNC("e1000_read_nvm_ich8lan"); 3600 3601 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 3602 (words == 0)) { 3603 DEBUGOUT("nvm parameter(s) out of bounds\n"); 3604 ret_val = -E1000_ERR_NVM; 3605 goto out; 3606 } 3607 3608 nvm->ops.acquire(hw); 3609 3610 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 3611 if (ret_val != E1000_SUCCESS) { 3612 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 3613 bank = 0; 3614 } 3615 3616 act_offset = (bank) ? nvm->flash_bank_size : 0; 3617 act_offset += offset; 3618 3619 ret_val = E1000_SUCCESS; 3620 for (i = 0; i < words; i++) { 3621 if (dev_spec->shadow_ram[offset+i].modified) { 3622 data[i] = dev_spec->shadow_ram[offset+i].value; 3623 } else { 3624 ret_val = e1000_read_flash_word_ich8lan(hw, 3625 act_offset + i, 3626 &word); 3627 if (ret_val) 3628 break; 3629 data[i] = word; 3630 } 3631 } 3632 3633 nvm->ops.release(hw); 3634 3635 out: 3636 if (ret_val) 3637 DEBUGOUT1("NVM read error: %d\n", ret_val); 3638 3639 return ret_val; 3640 } 3641 3642 /** 3643 * e1000_flash_cycle_init_ich8lan - Initialize flash 3644 * @hw: pointer to the HW structure 3645 * 3646 * This function does initial flash setup so that a new read/write/erase cycle 3647 * can be started. 3648 **/ 3649 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) 3650 { 3651 union ich8_hws_flash_status hsfsts; 3652 s32 ret_val = -E1000_ERR_NVM; 3653 3654 DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); 3655 3656 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 3657 3658 /* Check if the flash descriptor is valid */ 3659 if (!hsfsts.hsf_status.fldesvalid) { 3660 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n"); 3661 return -E1000_ERR_NVM; 3662 } 3663 3664 /* Clear FCERR and DAEL in hw status by writing 1 */ 3665 hsfsts.hsf_status.flcerr = 1; 3666 hsfsts.hsf_status.dael = 1; 3667 if (hw->mac.type >= e1000_pch_spt) 3668 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 3669 hsfsts.regval & 0xFFFF); 3670 else 3671 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); 3672 3673 /* Either we should have a hardware SPI cycle in progress 3674 * bit to check against, in order to start a new cycle or 3675 * FDONE bit should be changed in the hardware so that it 3676 * is 1 after hardware reset, which can then be used as an 3677 * indication whether a cycle is in progress or has been 3678 * completed. 3679 */ 3680 3681 if (!hsfsts.hsf_status.flcinprog) { 3682 /* There is no cycle running at present, 3683 * so we can start a cycle. 3684 * Begin by setting Flash Cycle Done. 3685 */ 3686 hsfsts.hsf_status.flcdone = 1; 3687 if (hw->mac.type >= e1000_pch_spt) 3688 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 3689 hsfsts.regval & 0xFFFF); 3690 else 3691 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, 3692 hsfsts.regval); 3693 ret_val = E1000_SUCCESS; 3694 } else { 3695 s32 i; 3696 3697 /* Otherwise poll for sometime so the current 3698 * cycle has a chance to end before giving up. 3699 */ 3700 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 3701 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 3702 ICH_FLASH_HSFSTS); 3703 if (!hsfsts.hsf_status.flcinprog) { 3704 ret_val = E1000_SUCCESS; 3705 break; 3706 } 3707 usec_delay(1); 3708 } 3709 if (ret_val == E1000_SUCCESS) { 3710 /* Successful in waiting for previous cycle to timeout, 3711 * now set the Flash Cycle Done. 3712 */ 3713 hsfsts.hsf_status.flcdone = 1; 3714 if (hw->mac.type >= e1000_pch_spt) 3715 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 3716 hsfsts.regval & 0xFFFF); 3717 else 3718 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, 3719 hsfsts.regval); 3720 } else { 3721 DEBUGOUT("Flash controller busy, cannot get access\n"); 3722 } 3723 } 3724 3725 return ret_val; 3726 } 3727 3728 /** 3729 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) 3730 * @hw: pointer to the HW structure 3731 * @timeout: maximum time to wait for completion 3732 * 3733 * This function starts a flash cycle and waits for its completion. 3734 **/ 3735 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) 3736 { 3737 union ich8_hws_flash_ctrl hsflctl; 3738 union ich8_hws_flash_status hsfsts; 3739 u32 i = 0; 3740 3741 DEBUGFUNC("e1000_flash_cycle_ich8lan"); 3742 3743 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 3744 if (hw->mac.type >= e1000_pch_spt) 3745 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; 3746 else 3747 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 3748 hsflctl.hsf_ctrl.flcgo = 1; 3749 3750 if (hw->mac.type >= e1000_pch_spt) 3751 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 3752 hsflctl.regval << 16); 3753 else 3754 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 3755 3756 /* wait till FDONE bit is set to 1 */ 3757 do { 3758 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 3759 if (hsfsts.hsf_status.flcdone) 3760 break; 3761 usec_delay(1); 3762 } while (i++ < timeout); 3763 3764 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) 3765 return E1000_SUCCESS; 3766 3767 return -E1000_ERR_NVM; 3768 } 3769 3770 /** 3771 * e1000_read_flash_dword_ich8lan - Read dword from flash 3772 * @hw: pointer to the HW structure 3773 * @offset: offset to data location 3774 * @data: pointer to the location for storing the data 3775 * 3776 * Reads the flash dword at offset into data. Offset is converted 3777 * to bytes before read. 3778 **/ 3779 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, 3780 u32 *data) 3781 { 3782 DEBUGFUNC("e1000_read_flash_dword_ich8lan"); 3783 3784 if (!data) 3785 return -E1000_ERR_NVM; 3786 3787 /* Must convert word offset into bytes. */ 3788 offset <<= 1; 3789 3790 return e1000_read_flash_data32_ich8lan(hw, offset, data); 3791 } 3792 3793 /** 3794 * e1000_read_flash_word_ich8lan - Read word from flash 3795 * @hw: pointer to the HW structure 3796 * @offset: offset to data location 3797 * @data: pointer to the location for storing the data 3798 * 3799 * Reads the flash word at offset into data. Offset is converted 3800 * to bytes before read. 3801 **/ 3802 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 3803 u16 *data) 3804 { 3805 DEBUGFUNC("e1000_read_flash_word_ich8lan"); 3806 3807 if (!data) 3808 return -E1000_ERR_NVM; 3809 3810 /* Must convert offset into bytes. */ 3811 offset <<= 1; 3812 3813 return e1000_read_flash_data_ich8lan(hw, offset, 2, data); 3814 } 3815 3816 /** 3817 * e1000_read_flash_byte_ich8lan - Read byte from flash 3818 * @hw: pointer to the HW structure 3819 * @offset: The offset of the byte to read. 3820 * @data: Pointer to a byte to store the value read. 3821 * 3822 * Reads a single byte from the NVM using the flash access registers. 3823 **/ 3824 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 3825 u8 *data) 3826 { 3827 s32 ret_val; 3828 u16 word = 0; 3829 3830 /* In SPT, only 32 bits access is supported, 3831 * so this function should not be called. 3832 */ 3833 if (hw->mac.type >= e1000_pch_spt) 3834 return -E1000_ERR_NVM; 3835 else 3836 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); 3837 3838 if (ret_val) 3839 return ret_val; 3840 3841 *data = (u8)word; 3842 3843 return E1000_SUCCESS; 3844 } 3845 3846 /** 3847 * e1000_read_flash_data_ich8lan - Read byte or word from NVM 3848 * @hw: pointer to the HW structure 3849 * @offset: The offset (in bytes) of the byte or word to read. 3850 * @size: Size of data to read, 1=byte 2=word 3851 * @data: Pointer to the word to store the value read. 3852 * 3853 * Reads a byte or word from the NVM using the flash access registers. 3854 **/ 3855 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 3856 u8 size, u16 *data) 3857 { 3858 union ich8_hws_flash_status hsfsts; 3859 union ich8_hws_flash_ctrl hsflctl; 3860 u32 flash_linear_addr; 3861 u32 flash_data = 0; 3862 s32 ret_val = -E1000_ERR_NVM; 3863 u8 count = 0; 3864 3865 DEBUGFUNC("e1000_read_flash_data_ich8lan"); 3866 3867 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 3868 return -E1000_ERR_NVM; 3869 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 3870 hw->nvm.flash_base_addr); 3871 3872 do { 3873 usec_delay(1); 3874 /* Steps */ 3875 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3876 if (ret_val != E1000_SUCCESS) 3877 break; 3878 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 3879 3880 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 3881 hsflctl.hsf_ctrl.fldbcount = size - 1; 3882 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 3883 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 3884 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 3885 3886 ret_val = e1000_flash_cycle_ich8lan(hw, 3887 ICH_FLASH_READ_COMMAND_TIMEOUT); 3888 3889 /* Check if FCERR is set to 1, if set to 1, clear it 3890 * and try the whole sequence a few more times, else 3891 * read in (shift in) the Flash Data0, the order is 3892 * least significant byte first msb to lsb 3893 */ 3894 if (ret_val == E1000_SUCCESS) { 3895 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); 3896 if (size == 1) 3897 *data = (u8)(flash_data & 0x000000FF); 3898 else if (size == 2) 3899 *data = (u16)(flash_data & 0x0000FFFF); 3900 break; 3901 } else { 3902 /* If we've gotten here, then things are probably 3903 * completely hosed, but if the error condition is 3904 * detected, it won't hurt to give it another try... 3905 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 3906 */ 3907 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 3908 ICH_FLASH_HSFSTS); 3909 if (hsfsts.hsf_status.flcerr) { 3910 /* Repeat for some time before giving up. */ 3911 continue; 3912 } else if (!hsfsts.hsf_status.flcdone) { 3913 DEBUGOUT("Timeout error - flash cycle did not complete.\n"); 3914 break; 3915 } 3916 } 3917 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 3918 3919 return ret_val; 3920 } 3921 3922 /** 3923 * e1000_read_flash_data32_ich8lan - Read dword from NVM 3924 * @hw: pointer to the HW structure 3925 * @offset: The offset (in bytes) of the dword to read. 3926 * @data: Pointer to the dword to store the value read. 3927 * 3928 * Reads a byte or word from the NVM using the flash access registers. 3929 **/ 3930 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, 3931 u32 *data) 3932 { 3933 union ich8_hws_flash_status hsfsts; 3934 union ich8_hws_flash_ctrl hsflctl; 3935 u32 flash_linear_addr; 3936 s32 ret_val = -E1000_ERR_NVM; 3937 u8 count = 0; 3938 3939 DEBUGFUNC("e1000_read_flash_data_ich8lan"); 3940 3941 if (offset > ICH_FLASH_LINEAR_ADDR_MASK || 3942 hw->mac.type < e1000_pch_spt) 3943 return -E1000_ERR_NVM; 3944 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 3945 hw->nvm.flash_base_addr); 3946 3947 do { 3948 usec_delay(1); 3949 /* Steps */ 3950 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3951 if (ret_val != E1000_SUCCESS) 3952 break; 3953 /* In SPT, This register is in Lan memory space, not flash. 3954 * Therefore, only 32 bit access is supported 3955 */ 3956 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; 3957 3958 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 3959 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; 3960 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 3961 /* In SPT, This register is in Lan memory space, not flash. 3962 * Therefore, only 32 bit access is supported 3963 */ 3964 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 3965 (u32)hsflctl.regval << 16); 3966 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 3967 3968 ret_val = e1000_flash_cycle_ich8lan(hw, 3969 ICH_FLASH_READ_COMMAND_TIMEOUT); 3970 3971 /* Check if FCERR is set to 1, if set to 1, clear it 3972 * and try the whole sequence a few more times, else 3973 * read in (shift in) the Flash Data0, the order is 3974 * least significant byte first msb to lsb 3975 */ 3976 if (ret_val == E1000_SUCCESS) { 3977 *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); 3978 break; 3979 } else { 3980 /* If we've gotten here, then things are probably 3981 * completely hosed, but if the error condition is 3982 * detected, it won't hurt to give it another try... 3983 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 3984 */ 3985 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 3986 ICH_FLASH_HSFSTS); 3987 if (hsfsts.hsf_status.flcerr) { 3988 /* Repeat for some time before giving up. */ 3989 continue; 3990 } else if (!hsfsts.hsf_status.flcdone) { 3991 DEBUGOUT("Timeout error - flash cycle did not complete.\n"); 3992 break; 3993 } 3994 } 3995 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 3996 3997 return ret_val; 3998 } 3999 4000 /** 4001 * e1000_write_nvm_ich8lan - Write word(s) to the NVM 4002 * @hw: pointer to the HW structure 4003 * @offset: The offset (in bytes) of the word(s) to write. 4004 * @words: Size of data to write in words 4005 * @data: Pointer to the word(s) to write at offset. 4006 * 4007 * Writes a byte or word to the NVM using the flash access registers. 4008 **/ 4009 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 4010 u16 *data) 4011 { 4012 struct e1000_nvm_info *nvm = &hw->nvm; 4013 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4014 u16 i; 4015 4016 DEBUGFUNC("e1000_write_nvm_ich8lan"); 4017 4018 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 4019 (words == 0)) { 4020 DEBUGOUT("nvm parameter(s) out of bounds\n"); 4021 return -E1000_ERR_NVM; 4022 } 4023 4024 nvm->ops.acquire(hw); 4025 4026 for (i = 0; i < words; i++) { 4027 dev_spec->shadow_ram[offset+i].modified = TRUE; 4028 dev_spec->shadow_ram[offset+i].value = data[i]; 4029 } 4030 4031 nvm->ops.release(hw); 4032 4033 return E1000_SUCCESS; 4034 } 4035 4036 /** 4037 * e1000_update_nvm_checksum_spt - Update the checksum for NVM 4038 * @hw: pointer to the HW structure 4039 * 4040 * The NVM checksum is updated by calling the generic update_nvm_checksum, 4041 * which writes the checksum to the shadow ram. The changes in the shadow 4042 * ram are then committed to the EEPROM by processing each bank at a time 4043 * checking for the modified bit and writing only the pending changes. 4044 * After a successful commit, the shadow ram is cleared and is ready for 4045 * future writes. 4046 **/ 4047 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) 4048 { 4049 struct e1000_nvm_info *nvm = &hw->nvm; 4050 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4051 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 4052 s32 ret_val; 4053 u32 dword = 0; 4054 4055 DEBUGFUNC("e1000_update_nvm_checksum_spt"); 4056 4057 ret_val = e1000_update_nvm_checksum_generic(hw); 4058 if (ret_val) 4059 goto out; 4060 4061 if (nvm->type != e1000_nvm_flash_sw) 4062 goto out; 4063 4064 nvm->ops.acquire(hw); 4065 4066 /* We're writing to the opposite bank so if we're on bank 1, 4067 * write to bank 0 etc. We also need to erase the segment that 4068 * is going to be written 4069 */ 4070 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 4071 if (ret_val != E1000_SUCCESS) { 4072 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 4073 bank = 0; 4074 } 4075 4076 if (bank == 0) { 4077 new_bank_offset = nvm->flash_bank_size; 4078 old_bank_offset = 0; 4079 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 4080 if (ret_val) 4081 goto release; 4082 } else { 4083 old_bank_offset = nvm->flash_bank_size; 4084 new_bank_offset = 0; 4085 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 4086 if (ret_val) 4087 goto release; 4088 } 4089 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) { 4090 /* Determine whether to write the value stored 4091 * in the other NVM bank or a modified value stored 4092 * in the shadow RAM 4093 */ 4094 ret_val = e1000_read_flash_dword_ich8lan(hw, 4095 i + old_bank_offset, 4096 &dword); 4097 4098 if (dev_spec->shadow_ram[i].modified) { 4099 dword &= 0xffff0000; 4100 dword |= (dev_spec->shadow_ram[i].value & 0xffff); 4101 } 4102 if (dev_spec->shadow_ram[i + 1].modified) { 4103 dword &= 0x0000ffff; 4104 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) 4105 << 16); 4106 } 4107 if (ret_val) 4108 break; 4109 4110 /* If the word is 0x13, then make sure the signature bits 4111 * (15:14) are 11b until the commit has completed. 4112 * This will allow us to write 10b which indicates the 4113 * signature is valid. We want to do this after the write 4114 * has completed so that we don't mark the segment valid 4115 * while the write is still in progress 4116 */ 4117 if (i == E1000_ICH_NVM_SIG_WORD - 1) 4118 dword |= E1000_ICH_NVM_SIG_MASK << 16; 4119 4120 /* Convert offset to bytes. */ 4121 act_offset = (i + new_bank_offset) << 1; 4122 4123 usec_delay(100); 4124 4125 /* Write the data to the new bank. Offset in words*/ 4126 act_offset = i + new_bank_offset; 4127 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, 4128 dword); 4129 if (ret_val) 4130 break; 4131 } 4132 4133 /* Don't bother writing the segment valid bits if sector 4134 * programming failed. 4135 */ 4136 if (ret_val) { 4137 DEBUGOUT("Flash commit failed.\n"); 4138 goto release; 4139 } 4140 4141 /* Finally validate the new segment by setting bit 15:14 4142 * to 10b in word 0x13 , this can be done without an 4143 * erase as well since these bits are 11 to start with 4144 * and we need to change bit 14 to 0b 4145 */ 4146 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 4147 4148 /*offset in words but we read dword*/ 4149 --act_offset; 4150 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); 4151 4152 if (ret_val) 4153 goto release; 4154 4155 dword &= 0xBFFFFFFF; 4156 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); 4157 4158 if (ret_val) 4159 goto release; 4160 4161 /* And invalidate the previously valid segment by setting 4162 * its signature word (0x13) high_byte to 0b. This can be 4163 * done without an erase because flash erase sets all bits 4164 * to 1's. We can write 1's to 0's without an erase 4165 */ 4166 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 4167 4168 /* offset in words but we read dword*/ 4169 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; 4170 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); 4171 4172 if (ret_val) 4173 goto release; 4174 4175 dword &= 0x00FFFFFF; 4176 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); 4177 4178 if (ret_val) 4179 goto release; 4180 4181 /* Great! Everything worked, we can now clear the cached entries. */ 4182 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 4183 dev_spec->shadow_ram[i].modified = FALSE; 4184 dev_spec->shadow_ram[i].value = 0xFFFF; 4185 } 4186 4187 release: 4188 nvm->ops.release(hw); 4189 4190 /* Reload the EEPROM, or else modifications will not appear 4191 * until after the next adapter reset. 4192 */ 4193 if (!ret_val) { 4194 nvm->ops.reload(hw); 4195 msec_delay(10); 4196 } 4197 4198 out: 4199 if (ret_val) 4200 DEBUGOUT1("NVM update error: %d\n", ret_val); 4201 4202 return ret_val; 4203 } 4204 4205 /** 4206 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM 4207 * @hw: pointer to the HW structure 4208 * 4209 * The NVM checksum is updated by calling the generic update_nvm_checksum, 4210 * which writes the checksum to the shadow ram. The changes in the shadow 4211 * ram are then committed to the EEPROM by processing each bank at a time 4212 * checking for the modified bit and writing only the pending changes. 4213 * After a successful commit, the shadow ram is cleared and is ready for 4214 * future writes. 4215 **/ 4216 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) 4217 { 4218 struct e1000_nvm_info *nvm = &hw->nvm; 4219 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4220 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 4221 s32 ret_val; 4222 u16 data = 0; 4223 4224 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); 4225 4226 ret_val = e1000_update_nvm_checksum_generic(hw); 4227 if (ret_val) 4228 goto out; 4229 4230 if (nvm->type != e1000_nvm_flash_sw) 4231 goto out; 4232 4233 nvm->ops.acquire(hw); 4234 4235 /* We're writing to the opposite bank so if we're on bank 1, 4236 * write to bank 0 etc. We also need to erase the segment that 4237 * is going to be written 4238 */ 4239 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 4240 if (ret_val != E1000_SUCCESS) { 4241 DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); 4242 bank = 0; 4243 } 4244 4245 if (bank == 0) { 4246 new_bank_offset = nvm->flash_bank_size; 4247 old_bank_offset = 0; 4248 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 4249 if (ret_val) 4250 goto release; 4251 } else { 4252 old_bank_offset = nvm->flash_bank_size; 4253 new_bank_offset = 0; 4254 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 4255 if (ret_val) 4256 goto release; 4257 } 4258 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 4259 if (dev_spec->shadow_ram[i].modified) { 4260 data = dev_spec->shadow_ram[i].value; 4261 } else { 4262 ret_val = e1000_read_flash_word_ich8lan(hw, i + 4263 old_bank_offset, 4264 &data); 4265 if (ret_val) 4266 break; 4267 } 4268 /* If the word is 0x13, then make sure the signature bits 4269 * (15:14) are 11b until the commit has completed. 4270 * This will allow us to write 10b which indicates the 4271 * signature is valid. We want to do this after the write 4272 * has completed so that we don't mark the segment valid 4273 * while the write is still in progress 4274 */ 4275 if (i == E1000_ICH_NVM_SIG_WORD) 4276 data |= E1000_ICH_NVM_SIG_MASK; 4277 4278 /* Convert offset to bytes. */ 4279 act_offset = (i + new_bank_offset) << 1; 4280 4281 usec_delay(100); 4282 4283 /* Write the bytes to the new bank. */ 4284 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 4285 act_offset, 4286 (u8)data); 4287 if (ret_val) 4288 break; 4289 4290 usec_delay(100); 4291 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 4292 act_offset + 1, 4293 (u8)(data >> 8)); 4294 if (ret_val) 4295 break; 4296 } 4297 4298 /* Don't bother writing the segment valid bits if sector 4299 * programming failed. 4300 */ 4301 if (ret_val) { 4302 DEBUGOUT("Flash commit failed.\n"); 4303 goto release; 4304 } 4305 4306 /* Finally validate the new segment by setting bit 15:14 4307 * to 10b in word 0x13 , this can be done without an 4308 * erase as well since these bits are 11 to start with 4309 * and we need to change bit 14 to 0b 4310 */ 4311 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 4312 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 4313 if (ret_val) 4314 goto release; 4315 4316 data &= 0xBFFF; 4317 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, 4318 (u8)(data >> 8)); 4319 if (ret_val) 4320 goto release; 4321 4322 /* And invalidate the previously valid segment by setting 4323 * its signature word (0x13) high_byte to 0b. This can be 4324 * done without an erase because flash erase sets all bits 4325 * to 1's. We can write 1's to 0's without an erase 4326 */ 4327 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 4328 4329 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 4330 4331 if (ret_val) 4332 goto release; 4333 4334 /* Great! Everything worked, we can now clear the cached entries. */ 4335 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 4336 dev_spec->shadow_ram[i].modified = FALSE; 4337 dev_spec->shadow_ram[i].value = 0xFFFF; 4338 } 4339 4340 release: 4341 nvm->ops.release(hw); 4342 4343 /* Reload the EEPROM, or else modifications will not appear 4344 * until after the next adapter reset. 4345 */ 4346 if (!ret_val) { 4347 nvm->ops.reload(hw); 4348 msec_delay(10); 4349 } 4350 4351 out: 4352 if (ret_val) 4353 DEBUGOUT1("NVM update error: %d\n", ret_val); 4354 4355 return ret_val; 4356 } 4357 4358 /** 4359 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum 4360 * @hw: pointer to the HW structure 4361 * 4362 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. 4363 * If the bit is 0, that the EEPROM had been modified, but the checksum was not 4364 * calculated, in which case we need to calculate the checksum and set bit 6. 4365 **/ 4366 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) 4367 { 4368 s32 ret_val; 4369 u16 data; 4370 u16 word; 4371 u16 valid_csum_mask; 4372 4373 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); 4374 4375 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, 4376 * the checksum needs to be fixed. This bit is an indication that 4377 * the NVM was prepared by OEM software and did not calculate 4378 * the checksum...a likely scenario. 4379 */ 4380 switch (hw->mac.type) { 4381 case e1000_pch_lpt: 4382 case e1000_pch_spt: 4383 case e1000_pch_cnp: 4384 word = NVM_COMPAT; 4385 valid_csum_mask = NVM_COMPAT_VALID_CSUM; 4386 break; 4387 default: 4388 word = NVM_FUTURE_INIT_WORD1; 4389 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; 4390 break; 4391 } 4392 4393 ret_val = hw->nvm.ops.read(hw, word, 1, &data); 4394 if (ret_val) 4395 return ret_val; 4396 4397 if (!(data & valid_csum_mask)) { 4398 data |= valid_csum_mask; 4399 ret_val = hw->nvm.ops.write(hw, word, 1, &data); 4400 if (ret_val) 4401 return ret_val; 4402 ret_val = hw->nvm.ops.update(hw); 4403 if (ret_val) 4404 return ret_val; 4405 } 4406 4407 return e1000_validate_nvm_checksum_generic(hw); 4408 } 4409 4410 /** 4411 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM 4412 * @hw: pointer to the HW structure 4413 * @offset: The offset (in bytes) of the byte/word to read. 4414 * @size: Size of data to read, 1=byte 2=word 4415 * @data: The byte(s) to write to the NVM. 4416 * 4417 * Writes one/two bytes to the NVM using the flash access registers. 4418 **/ 4419 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 4420 u8 size, u16 data) 4421 { 4422 union ich8_hws_flash_status hsfsts; 4423 union ich8_hws_flash_ctrl hsflctl; 4424 u32 flash_linear_addr; 4425 u32 flash_data = 0; 4426 s32 ret_val; 4427 u8 count = 0; 4428 4429 DEBUGFUNC("e1000_write_ich8_data"); 4430 4431 if (hw->mac.type >= e1000_pch_spt) { 4432 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 4433 return -E1000_ERR_NVM; 4434 } else { 4435 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 4436 return -E1000_ERR_NVM; 4437 } 4438 4439 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 4440 hw->nvm.flash_base_addr); 4441 4442 do { 4443 usec_delay(1); 4444 /* Steps */ 4445 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4446 if (ret_val != E1000_SUCCESS) 4447 break; 4448 /* In SPT, This register is in Lan memory space, not 4449 * flash. Therefore, only 32 bit access is supported 4450 */ 4451 if (hw->mac.type >= e1000_pch_spt) 4452 hsflctl.regval = 4453 E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; 4454 else 4455 hsflctl.regval = 4456 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 4457 4458 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 4459 hsflctl.hsf_ctrl.fldbcount = size - 1; 4460 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 4461 /* In SPT, This register is in Lan memory space, 4462 * not flash. Therefore, only 32 bit access is 4463 * supported 4464 */ 4465 if (hw->mac.type >= e1000_pch_spt) 4466 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 4467 hsflctl.regval << 16); 4468 else 4469 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, 4470 hsflctl.regval); 4471 4472 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 4473 4474 if (size == 1) 4475 flash_data = (u32)data & 0x00FF; 4476 else 4477 flash_data = (u32)data; 4478 4479 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); 4480 4481 /* check if FCERR is set to 1 , if set to 1, clear it 4482 * and try the whole sequence a few more times else done 4483 */ 4484 ret_val = 4485 e1000_flash_cycle_ich8lan(hw, 4486 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 4487 if (ret_val == E1000_SUCCESS) 4488 break; 4489 4490 /* If we're here, then things are most likely 4491 * completely hosed, but if the error condition 4492 * is detected, it won't hurt to give it another 4493 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 4494 */ 4495 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 4496 if (hsfsts.hsf_status.flcerr) 4497 /* Repeat for some time before giving up. */ 4498 continue; 4499 if (!hsfsts.hsf_status.flcdone) { 4500 DEBUGOUT("Timeout error - flash cycle did not complete.\n"); 4501 break; 4502 } 4503 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 4504 4505 return ret_val; 4506 } 4507 4508 /** 4509 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM 4510 * @hw: pointer to the HW structure 4511 * @offset: The offset (in bytes) of the dwords to read. 4512 * @data: The 4 bytes to write to the NVM. 4513 * 4514 * Writes one/two/four bytes to the NVM using the flash access registers. 4515 **/ 4516 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, 4517 u32 data) 4518 { 4519 union ich8_hws_flash_status hsfsts; 4520 union ich8_hws_flash_ctrl hsflctl; 4521 u32 flash_linear_addr; 4522 s32 ret_val; 4523 u8 count = 0; 4524 4525 DEBUGFUNC("e1000_write_flash_data32_ich8lan"); 4526 4527 if (hw->mac.type >= e1000_pch_spt) { 4528 if (offset > ICH_FLASH_LINEAR_ADDR_MASK) 4529 return -E1000_ERR_NVM; 4530 } 4531 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 4532 hw->nvm.flash_base_addr); 4533 do { 4534 usec_delay(1); 4535 /* Steps */ 4536 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4537 if (ret_val != E1000_SUCCESS) 4538 break; 4539 4540 /* In SPT, This register is in Lan memory space, not 4541 * flash. Therefore, only 32 bit access is supported 4542 */ 4543 if (hw->mac.type >= e1000_pch_spt) 4544 hsflctl.regval = E1000_READ_FLASH_REG(hw, 4545 ICH_FLASH_HSFSTS) 4546 >> 16; 4547 else 4548 hsflctl.regval = E1000_READ_FLASH_REG16(hw, 4549 ICH_FLASH_HSFCTL); 4550 4551 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; 4552 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 4553 4554 /* In SPT, This register is in Lan memory space, 4555 * not flash. Therefore, only 32 bit access is 4556 * supported 4557 */ 4558 if (hw->mac.type >= e1000_pch_spt) 4559 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 4560 hsflctl.regval << 16); 4561 else 4562 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, 4563 hsflctl.regval); 4564 4565 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 4566 4567 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data); 4568 4569 /* check if FCERR is set to 1 , if set to 1, clear it 4570 * and try the whole sequence a few more times else done 4571 */ 4572 ret_val = e1000_flash_cycle_ich8lan(hw, 4573 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 4574 4575 if (ret_val == E1000_SUCCESS) 4576 break; 4577 4578 /* If we're here, then things are most likely 4579 * completely hosed, but if the error condition 4580 * is detected, it won't hurt to give it another 4581 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 4582 */ 4583 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 4584 4585 if (hsfsts.hsf_status.flcerr) 4586 /* Repeat for some time before giving up. */ 4587 continue; 4588 if (!hsfsts.hsf_status.flcdone) { 4589 DEBUGOUT("Timeout error - flash cycle did not complete.\n"); 4590 break; 4591 } 4592 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 4593 4594 return ret_val; 4595 } 4596 4597 /** 4598 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM 4599 * @hw: pointer to the HW structure 4600 * @offset: The index of the byte to read. 4601 * @data: The byte to write to the NVM. 4602 * 4603 * Writes a single byte to the NVM using the flash access registers. 4604 **/ 4605 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 4606 u8 data) 4607 { 4608 u16 word = (u16)data; 4609 4610 DEBUGFUNC("e1000_write_flash_byte_ich8lan"); 4611 4612 return e1000_write_flash_data_ich8lan(hw, offset, 1, word); 4613 } 4614 4615 /** 4616 * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM 4617 * @hw: pointer to the HW structure 4618 * @offset: The offset of the word to write. 4619 * @dword: The dword to write to the NVM. 4620 * 4621 * Writes a single dword to the NVM using the flash access registers. 4622 * Goes through a retry algorithm before giving up. 4623 **/ 4624 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, 4625 u32 offset, u32 dword) 4626 { 4627 s32 ret_val; 4628 u16 program_retries; 4629 4630 DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan"); 4631 4632 /* Must convert word offset into bytes. */ 4633 offset <<= 1; 4634 4635 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); 4636 4637 if (!ret_val) 4638 return ret_val; 4639 for (program_retries = 0; program_retries < 100; program_retries++) { 4640 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset); 4641 usec_delay(100); 4642 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); 4643 if (ret_val == E1000_SUCCESS) 4644 break; 4645 } 4646 if (program_retries == 100) 4647 return -E1000_ERR_NVM; 4648 4649 return E1000_SUCCESS; 4650 } 4651 4652 /** 4653 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM 4654 * @hw: pointer to the HW structure 4655 * @offset: The offset of the byte to write. 4656 * @byte: The byte to write to the NVM. 4657 * 4658 * Writes a single byte to the NVM using the flash access registers. 4659 * Goes through a retry algorithm before giving up. 4660 **/ 4661 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 4662 u32 offset, u8 byte) 4663 { 4664 s32 ret_val; 4665 u16 program_retries; 4666 4667 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); 4668 4669 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 4670 if (!ret_val) 4671 return ret_val; 4672 4673 for (program_retries = 0; program_retries < 100; program_retries++) { 4674 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); 4675 usec_delay(100); 4676 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 4677 if (ret_val == E1000_SUCCESS) 4678 break; 4679 } 4680 if (program_retries == 100) 4681 return -E1000_ERR_NVM; 4682 4683 return E1000_SUCCESS; 4684 } 4685 4686 /** 4687 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM 4688 * @hw: pointer to the HW structure 4689 * @bank: 0 for first bank, 1 for second bank, etc. 4690 * 4691 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. 4692 * bank N is 4096 * N + flash_reg_addr. 4693 **/ 4694 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) 4695 { 4696 struct e1000_nvm_info *nvm = &hw->nvm; 4697 union ich8_hws_flash_status hsfsts; 4698 union ich8_hws_flash_ctrl hsflctl; 4699 u32 flash_linear_addr; 4700 /* bank size is in 16bit words - adjust to bytes */ 4701 u32 flash_bank_size = nvm->flash_bank_size * 2; 4702 s32 ret_val; 4703 s32 count = 0; 4704 s32 j, iteration, sector_size; 4705 4706 DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); 4707 4708 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 4709 4710 /* Determine HW Sector size: Read BERASE bits of hw flash status 4711 * register 4712 * 00: The Hw sector is 256 bytes, hence we need to erase 16 4713 * consecutive sectors. The start index for the nth Hw sector 4714 * can be calculated as = bank * 4096 + n * 256 4715 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 4716 * The start index for the nth Hw sector can be calculated 4717 * as = bank * 4096 4718 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 4719 * (ich9 only, otherwise error condition) 4720 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 4721 */ 4722 switch (hsfsts.hsf_status.berasesz) { 4723 case 0: 4724 /* Hw sector size 256 */ 4725 sector_size = ICH_FLASH_SEG_SIZE_256; 4726 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; 4727 break; 4728 case 1: 4729 sector_size = ICH_FLASH_SEG_SIZE_4K; 4730 iteration = 1; 4731 break; 4732 case 2: 4733 sector_size = ICH_FLASH_SEG_SIZE_8K; 4734 iteration = 1; 4735 break; 4736 case 3: 4737 sector_size = ICH_FLASH_SEG_SIZE_64K; 4738 iteration = 1; 4739 break; 4740 default: 4741 return -E1000_ERR_NVM; 4742 } 4743 4744 /* Start with the base address, then add the sector offset. */ 4745 flash_linear_addr = hw->nvm.flash_base_addr; 4746 flash_linear_addr += (bank) ? flash_bank_size : 0; 4747 4748 for (j = 0; j < iteration; j++) { 4749 do { 4750 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; 4751 4752 /* Steps */ 4753 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4754 if (ret_val) 4755 return ret_val; 4756 4757 /* Write a value 11 (block Erase) in Flash 4758 * Cycle field in hw flash control 4759 */ 4760 if (hw->mac.type >= e1000_pch_spt) 4761 hsflctl.regval = 4762 E1000_READ_FLASH_REG(hw, 4763 ICH_FLASH_HSFSTS)>>16; 4764 else 4765 hsflctl.regval = 4766 E1000_READ_FLASH_REG16(hw, 4767 ICH_FLASH_HSFCTL); 4768 4769 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 4770 if (hw->mac.type >= e1000_pch_spt) 4771 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, 4772 hsflctl.regval << 16); 4773 else 4774 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, 4775 hsflctl.regval); 4776 4777 /* Write the last 24 bits of an index within the 4778 * block into Flash Linear address field in Flash 4779 * Address. 4780 */ 4781 flash_linear_addr += (j * sector_size); 4782 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, 4783 flash_linear_addr); 4784 4785 ret_val = e1000_flash_cycle_ich8lan(hw, timeout); 4786 if (ret_val == E1000_SUCCESS) 4787 break; 4788 4789 /* Check if FCERR is set to 1. If 1, 4790 * clear it and try the whole sequence 4791 * a few more times else Done 4792 */ 4793 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 4794 ICH_FLASH_HSFSTS); 4795 if (hsfsts.hsf_status.flcerr) 4796 /* repeat for some time before giving up */ 4797 continue; 4798 else if (!hsfsts.hsf_status.flcdone) 4799 return ret_val; 4800 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 4801 } 4802 4803 return E1000_SUCCESS; 4804 } 4805 4806 /** 4807 * e1000_valid_led_default_ich8lan - Set the default LED settings 4808 * @hw: pointer to the HW structure 4809 * @data: Pointer to the LED settings 4810 * 4811 * Reads the LED default settings from the NVM to data. If the NVM LED 4812 * settings is all 0's or F's, set the LED default to a valid LED default 4813 * setting. 4814 **/ 4815 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) 4816 { 4817 s32 ret_val; 4818 4819 DEBUGFUNC("e1000_valid_led_default_ich8lan"); 4820 4821 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 4822 if (ret_val) { 4823 DEBUGOUT("NVM Read Error\n"); 4824 return ret_val; 4825 } 4826 4827 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) 4828 *data = ID_LED_DEFAULT_ICH8LAN; 4829 4830 return E1000_SUCCESS; 4831 } 4832 4833 /** 4834 * e1000_id_led_init_pchlan - store LED configurations 4835 * @hw: pointer to the HW structure 4836 * 4837 * PCH does not control LEDs via the LEDCTL register, rather it uses 4838 * the PHY LED configuration register. 4839 * 4840 * PCH also does not have an "always on" or "always off" mode which 4841 * complicates the ID feature. Instead of using the "on" mode to indicate 4842 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()), 4843 * use "link_up" mode. The LEDs will still ID on request if there is no 4844 * link based on logic in e1000_led_[on|off]_pchlan(). 4845 **/ 4846 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) 4847 { 4848 struct e1000_mac_info *mac = &hw->mac; 4849 s32 ret_val; 4850 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; 4851 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; 4852 u16 data, i, temp, shift; 4853 4854 DEBUGFUNC("e1000_id_led_init_pchlan"); 4855 4856 /* Get default ID LED modes */ 4857 ret_val = hw->nvm.ops.valid_led_default(hw, &data); 4858 if (ret_val) 4859 return ret_val; 4860 4861 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); 4862 mac->ledctl_mode1 = mac->ledctl_default; 4863 mac->ledctl_mode2 = mac->ledctl_default; 4864 4865 for (i = 0; i < 4; i++) { 4866 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; 4867 shift = (i * 5); 4868 switch (temp) { 4869 case ID_LED_ON1_DEF2: 4870 case ID_LED_ON1_ON2: 4871 case ID_LED_ON1_OFF2: 4872 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 4873 mac->ledctl_mode1 |= (ledctl_on << shift); 4874 break; 4875 case ID_LED_OFF1_DEF2: 4876 case ID_LED_OFF1_ON2: 4877 case ID_LED_OFF1_OFF2: 4878 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); 4879 mac->ledctl_mode1 |= (ledctl_off << shift); 4880 break; 4881 default: 4882 /* Do nothing */ 4883 break; 4884 } 4885 switch (temp) { 4886 case ID_LED_DEF1_ON2: 4887 case ID_LED_ON1_ON2: 4888 case ID_LED_OFF1_ON2: 4889 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 4890 mac->ledctl_mode2 |= (ledctl_on << shift); 4891 break; 4892 case ID_LED_DEF1_OFF2: 4893 case ID_LED_ON1_OFF2: 4894 case ID_LED_OFF1_OFF2: 4895 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); 4896 mac->ledctl_mode2 |= (ledctl_off << shift); 4897 break; 4898 default: 4899 /* Do nothing */ 4900 break; 4901 } 4902 } 4903 4904 return E1000_SUCCESS; 4905 } 4906 4907 /** 4908 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width 4909 * @hw: pointer to the HW structure 4910 * 4911 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability 4912 * register, so the the bus width is hard coded. 4913 **/ 4914 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) 4915 { 4916 struct e1000_bus_info *bus = &hw->bus; 4917 s32 ret_val; 4918 4919 DEBUGFUNC("e1000_get_bus_info_ich8lan"); 4920 4921 ret_val = e1000_get_bus_info_pcie_generic(hw); 4922 4923 /* ICH devices are "PCI Express"-ish. They have 4924 * a configuration space, but do not contain 4925 * PCI Express Capability registers, so bus width 4926 * must be hardcoded. 4927 */ 4928 if (bus->width == e1000_bus_width_unknown) 4929 bus->width = e1000_bus_width_pcie_x1; 4930 4931 return ret_val; 4932 } 4933 4934 /** 4935 * e1000_reset_hw_ich8lan - Reset the hardware 4936 * @hw: pointer to the HW structure 4937 * 4938 * Does a full reset of the hardware which includes a reset of the PHY and 4939 * MAC. 4940 **/ 4941 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 4942 { 4943 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4944 u16 kum_cfg; 4945 u32 ctrl, reg; 4946 s32 ret_val; 4947 4948 DEBUGFUNC("e1000_reset_hw_ich8lan"); 4949 4950 /* Prevent the PCI-E bus from sticking if there is no TLP connection 4951 * on the last TLP read/write transaction when MAC is reset. 4952 */ 4953 ret_val = e1000_disable_pcie_master_generic(hw); 4954 if (ret_val) 4955 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 4956 4957 DEBUGOUT("Masking off all interrupts\n"); 4958 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 4959 4960 /* Disable the Transmit and Receive units. Then delay to allow 4961 * any pending transactions to complete before we hit the MAC 4962 * with the global reset. 4963 */ 4964 E1000_WRITE_REG(hw, E1000_RCTL, 0); 4965 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 4966 E1000_WRITE_FLUSH(hw); 4967 4968 msec_delay(10); 4969 4970 /* Workaround for ICH8 bit corruption issue in FIFO memory */ 4971 if (hw->mac.type == e1000_ich8lan) { 4972 /* Set Tx and Rx buffer allocation to 8k apiece. */ 4973 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); 4974 /* Set Packet Buffer Size to 16k. */ 4975 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); 4976 } 4977 4978 if (hw->mac.type == e1000_pchlan) { 4979 /* Save the NVM K1 bit setting*/ 4980 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); 4981 if (ret_val) 4982 return ret_val; 4983 4984 if (kum_cfg & E1000_NVM_K1_ENABLE) 4985 dev_spec->nvm_k1_enabled = TRUE; 4986 else 4987 dev_spec->nvm_k1_enabled = FALSE; 4988 } 4989 4990 ctrl = E1000_READ_REG(hw, E1000_CTRL); 4991 4992 if (!hw->phy.ops.check_reset_block(hw)) { 4993 /* Full-chip reset requires MAC and PHY reset at the same 4994 * time to make sure the interface between MAC and the 4995 * external PHY is reset. 4996 */ 4997 ctrl |= E1000_CTRL_PHY_RST; 4998 4999 /* Gate automatic PHY configuration by hardware on 5000 * non-managed 82579 5001 */ 5002 if ((hw->mac.type == e1000_pch2lan) && 5003 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) 5004 e1000_gate_hw_phy_config_ich8lan(hw, TRUE); 5005 } 5006 ret_val = e1000_acquire_swflag_ich8lan(hw); 5007 DEBUGOUT("Issuing a global reset to ich8lan\n"); 5008 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); 5009 /* cannot issue a flush here because it hangs the hardware */ 5010 msec_delay(20); 5011 5012 /* Set Phy Config Counter to 50msec */ 5013 if (hw->mac.type == e1000_pch2lan) { 5014 reg = E1000_READ_REG(hw, E1000_FEXTNVM3); 5015 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; 5016 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 5017 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg); 5018 } 5019 5020 if (ctrl & E1000_CTRL_PHY_RST) { 5021 ret_val = hw->phy.ops.get_cfg_done(hw); 5022 if (ret_val) 5023 return ret_val; 5024 5025 ret_val = e1000_post_phy_reset_ich8lan(hw); 5026 if (ret_val) 5027 return ret_val; 5028 } 5029 5030 /* For PCH, this write will make sure that any noise 5031 * will be detected as a CRC error and be dropped rather than show up 5032 * as a bad packet to the DMA engine. 5033 */ 5034 if (hw->mac.type == e1000_pchlan) 5035 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565); 5036 5037 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 5038 E1000_READ_REG(hw, E1000_ICR); 5039 5040 reg = E1000_READ_REG(hw, E1000_KABGTXD); 5041 reg |= E1000_KABGTXD_BGSQLBIAS; 5042 E1000_WRITE_REG(hw, E1000_KABGTXD, reg); 5043 5044 return E1000_SUCCESS; 5045 } 5046 5047 /** 5048 * e1000_init_hw_ich8lan - Initialize the hardware 5049 * @hw: pointer to the HW structure 5050 * 5051 * Prepares the hardware for transmit and receive by doing the following: 5052 * - initialize hardware bits 5053 * - initialize LED identification 5054 * - setup receive address registers 5055 * - setup flow control 5056 * - setup transmit descriptors 5057 * - clear statistics 5058 **/ 5059 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) 5060 { 5061 struct e1000_mac_info *mac = &hw->mac; 5062 u32 ctrl_ext, txdctl, snoop; 5063 s32 ret_val; 5064 u16 i; 5065 5066 DEBUGFUNC("e1000_init_hw_ich8lan"); 5067 5068 e1000_initialize_hw_bits_ich8lan(hw); 5069 5070 /* Initialize identification LED */ 5071 ret_val = mac->ops.id_led_init(hw); 5072 /* An error is not fatal and we should not stop init due to this */ 5073 if (ret_val) 5074 DEBUGOUT("Error initializing identification LED\n"); 5075 5076 /* Setup the receive address. */ 5077 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); 5078 5079 /* Zero out the Multicast HASH table */ 5080 DEBUGOUT("Zeroing the MTA\n"); 5081 for (i = 0; i < mac->mta_reg_count; i++) 5082 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 5083 5084 /* The 82578 Rx buffer will stall if wakeup is enabled in host and 5085 * the ME. Disable wakeup by clearing the host wakeup bit. 5086 * Reset the phy after disabling host wakeup to reset the Rx buffer. 5087 */ 5088 if (hw->phy.type == e1000_phy_82578) { 5089 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i); 5090 i &= ~BM_WUC_HOST_WU_BIT; 5091 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i); 5092 ret_val = e1000_phy_hw_reset_ich8lan(hw); 5093 if (ret_val) 5094 return ret_val; 5095 } 5096 5097 /* Setup link and flow control */ 5098 ret_val = mac->ops.setup_link(hw); 5099 5100 /* Set the transmit descriptor write-back policy for both queues */ 5101 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); 5102 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | 5103 E1000_TXDCTL_FULL_TX_DESC_WB); 5104 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | 5105 E1000_TXDCTL_MAX_TX_DESC_PREFETCH); 5106 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); 5107 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); 5108 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | 5109 E1000_TXDCTL_FULL_TX_DESC_WB); 5110 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | 5111 E1000_TXDCTL_MAX_TX_DESC_PREFETCH); 5112 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); 5113 5114 /* ICH8 has opposite polarity of no_snoop bits. 5115 * By default, we should use snoop behavior. 5116 */ 5117 if (mac->type == e1000_ich8lan) 5118 snoop = PCIE_ICH8_SNOOP_ALL; 5119 else 5120 snoop = (u32) ~(PCIE_NO_SNOOP_ALL); 5121 e1000_set_pcie_no_snoop_generic(hw, snoop); 5122 5123 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5124 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 5125 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5126 5127 /* Clear all of the statistics registers (clear on read). It is 5128 * important that we do this after we have tried to establish link 5129 * because the symbol error count will increment wildly if there 5130 * is no link. 5131 */ 5132 e1000_clear_hw_cntrs_ich8lan(hw); 5133 5134 return ret_val; 5135 } 5136 5137 /** 5138 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits 5139 * @hw: pointer to the HW structure 5140 * 5141 * Sets/Clears required hardware bits necessary for correctly setting up the 5142 * hardware for transmit and receive. 5143 **/ 5144 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) 5145 { 5146 u32 reg; 5147 5148 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); 5149 5150 /* Extended Device Control */ 5151 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 5152 reg |= (1 << 22); 5153 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 5154 if (hw->mac.type >= e1000_pchlan) 5155 reg |= E1000_CTRL_EXT_PHYPDEN; 5156 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 5157 5158 /* Transmit Descriptor Control 0 */ 5159 reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); 5160 reg |= (1 << 22); 5161 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); 5162 5163 /* Transmit Descriptor Control 1 */ 5164 reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); 5165 reg |= (1 << 22); 5166 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); 5167 5168 /* Transmit Arbitration Control 0 */ 5169 reg = E1000_READ_REG(hw, E1000_TARC(0)); 5170 if (hw->mac.type == e1000_ich8lan) 5171 reg |= (1 << 28) | (1 << 29); 5172 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); 5173 E1000_WRITE_REG(hw, E1000_TARC(0), reg); 5174 5175 /* Transmit Arbitration Control 1 */ 5176 reg = E1000_READ_REG(hw, E1000_TARC(1)); 5177 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) 5178 reg &= ~(1 << 28); 5179 else 5180 reg |= (1 << 28); 5181 reg |= (1 << 24) | (1 << 26) | (1 << 30); 5182 E1000_WRITE_REG(hw, E1000_TARC(1), reg); 5183 5184 /* Device Status */ 5185 if (hw->mac.type == e1000_ich8lan) { 5186 reg = E1000_READ_REG(hw, E1000_STATUS); 5187 reg &= ~(1 << 31); 5188 E1000_WRITE_REG(hw, E1000_STATUS, reg); 5189 } 5190 5191 /* work-around descriptor data corruption issue during nfs v2 udp 5192 * traffic, just disable the nfs filtering capability 5193 */ 5194 reg = E1000_READ_REG(hw, E1000_RFCTL); 5195 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 5196 5197 /* Disable IPv6 extension header parsing because some malformed 5198 * IPv6 headers can hang the Rx. 5199 */ 5200 if (hw->mac.type == e1000_ich8lan) 5201 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); 5202 E1000_WRITE_REG(hw, E1000_RFCTL, reg); 5203 5204 /* Enable ECC on Lynxpoint */ 5205 if (hw->mac.type >= e1000_pch_lpt) { 5206 reg = E1000_READ_REG(hw, E1000_PBECCSTS); 5207 reg |= E1000_PBECCSTS_ECC_ENABLE; 5208 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg); 5209 5210 reg = E1000_READ_REG(hw, E1000_CTRL); 5211 reg |= E1000_CTRL_MEHE; 5212 E1000_WRITE_REG(hw, E1000_CTRL, reg); 5213 } 5214 5215 return; 5216 } 5217 5218 /** 5219 * e1000_setup_link_ich8lan - Setup flow control and link settings 5220 * @hw: pointer to the HW structure 5221 * 5222 * Determines which flow control settings to use, then configures flow 5223 * control. Calls the appropriate media-specific link configuration 5224 * function. Assuming the adapter has a valid link partner, a valid link 5225 * should be established. Assumes the hardware has previously been reset 5226 * and the transmitter and receiver are not enabled. 5227 **/ 5228 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) 5229 { 5230 s32 ret_val; 5231 5232 DEBUGFUNC("e1000_setup_link_ich8lan"); 5233 5234 if (hw->phy.ops.check_reset_block(hw)) 5235 return E1000_SUCCESS; 5236 5237 /* ICH parts do not have a word in the NVM to determine 5238 * the default flow control setting, so we explicitly 5239 * set it to full. 5240 */ 5241 if (hw->fc.requested_mode == e1000_fc_default) 5242 hw->fc.requested_mode = e1000_fc_full; 5243 5244 /* Save off the requested flow control mode for use later. Depending 5245 * on the link partner's capabilities, we may or may not use this mode. 5246 */ 5247 hw->fc.current_mode = hw->fc.requested_mode; 5248 5249 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", 5250 hw->fc.current_mode); 5251 5252 /* Continue to configure the copper link. */ 5253 ret_val = hw->mac.ops.setup_physical_interface(hw); 5254 if (ret_val) 5255 return ret_val; 5256 5257 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); 5258 if ((hw->phy.type == e1000_phy_82578) || 5259 (hw->phy.type == e1000_phy_82579) || 5260 (hw->phy.type == e1000_phy_i217) || 5261 (hw->phy.type == e1000_phy_82577)) { 5262 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time); 5263 5264 ret_val = hw->phy.ops.write_reg(hw, 5265 PHY_REG(BM_PORT_CTRL_PAGE, 27), 5266 hw->fc.pause_time); 5267 if (ret_val) 5268 return ret_val; 5269 } 5270 5271 return e1000_set_fc_watermarks_generic(hw); 5272 } 5273 5274 /** 5275 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface 5276 * @hw: pointer to the HW structure 5277 * 5278 * Configures the kumeran interface to the PHY to wait the appropriate time 5279 * when polling the PHY, then call the generic setup_copper_link to finish 5280 * configuring the copper link. 5281 **/ 5282 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) 5283 { 5284 u32 ctrl; 5285 s32 ret_val; 5286 u16 reg_data; 5287 5288 DEBUGFUNC("e1000_setup_copper_link_ich8lan"); 5289 5290 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5291 ctrl |= E1000_CTRL_SLU; 5292 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 5293 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5294 5295 /* Set the mac to wait the maximum time between each iteration 5296 * and increase the max iterations when polling the phy; 5297 * this fixes erroneous timeouts at 10Mbps. 5298 */ 5299 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 5300 0xFFFF); 5301 if (ret_val) 5302 return ret_val; 5303 ret_val = e1000_read_kmrn_reg_generic(hw, 5304 E1000_KMRNCTRLSTA_INBAND_PARAM, 5305 ®_data); 5306 if (ret_val) 5307 return ret_val; 5308 reg_data |= 0x3F; 5309 ret_val = e1000_write_kmrn_reg_generic(hw, 5310 E1000_KMRNCTRLSTA_INBAND_PARAM, 5311 reg_data); 5312 if (ret_val) 5313 return ret_val; 5314 5315 switch (hw->phy.type) { 5316 case e1000_phy_igp_3: 5317 ret_val = e1000_copper_link_setup_igp(hw); 5318 if (ret_val) 5319 return ret_val; 5320 break; 5321 case e1000_phy_bm: 5322 case e1000_phy_82578: 5323 ret_val = e1000_copper_link_setup_m88(hw); 5324 if (ret_val) 5325 return ret_val; 5326 break; 5327 case e1000_phy_82577: 5328 case e1000_phy_82579: 5329 ret_val = e1000_copper_link_setup_82577(hw); 5330 if (ret_val) 5331 return ret_val; 5332 break; 5333 case e1000_phy_ife: 5334 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, 5335 ®_data); 5336 if (ret_val) 5337 return ret_val; 5338 5339 reg_data &= ~IFE_PMC_AUTO_MDIX; 5340 5341 switch (hw->phy.mdix) { 5342 case 1: 5343 reg_data &= ~IFE_PMC_FORCE_MDIX; 5344 break; 5345 case 2: 5346 reg_data |= IFE_PMC_FORCE_MDIX; 5347 break; 5348 case 0: 5349 default: 5350 reg_data |= IFE_PMC_AUTO_MDIX; 5351 break; 5352 } 5353 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, 5354 reg_data); 5355 if (ret_val) 5356 return ret_val; 5357 break; 5358 default: 5359 break; 5360 } 5361 5362 return e1000_setup_copper_link_generic(hw); 5363 } 5364 5365 /** 5366 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface 5367 * @hw: pointer to the HW structure 5368 * 5369 * Calls the PHY specific link setup function and then calls the 5370 * generic setup_copper_link to finish configuring the link for 5371 * Lynxpoint PCH devices 5372 **/ 5373 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) 5374 { 5375 u32 ctrl; 5376 s32 ret_val; 5377 5378 DEBUGFUNC("e1000_setup_copper_link_pch_lpt"); 5379 5380 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5381 ctrl |= E1000_CTRL_SLU; 5382 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 5383 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5384 5385 ret_val = e1000_copper_link_setup_82577(hw); 5386 if (ret_val) 5387 return ret_val; 5388 5389 return e1000_setup_copper_link_generic(hw); 5390 } 5391 5392 /** 5393 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex 5394 * @hw: pointer to the HW structure 5395 * @speed: pointer to store current link speed 5396 * @duplex: pointer to store the current link duplex 5397 * 5398 * Calls the generic get_speed_and_duplex to retrieve the current link 5399 * information and then calls the Kumeran lock loss workaround for links at 5400 * gigabit speeds. 5401 **/ 5402 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, 5403 u16 *duplex) 5404 { 5405 s32 ret_val; 5406 5407 DEBUGFUNC("e1000_get_link_up_info_ich8lan"); 5408 5409 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); 5410 if (ret_val) 5411 return ret_val; 5412 5413 if ((hw->mac.type == e1000_ich8lan) && 5414 (hw->phy.type == e1000_phy_igp_3) && 5415 (*speed == SPEED_1000)) { 5416 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); 5417 } 5418 5419 return ret_val; 5420 } 5421 5422 /** 5423 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround 5424 * @hw: pointer to the HW structure 5425 * 5426 * Work-around for 82566 Kumeran PCS lock loss: 5427 * On link status change (i.e. PCI reset, speed change) and link is up and 5428 * speed is gigabit- 5429 * 0) if workaround is optionally disabled do nothing 5430 * 1) wait 1ms for Kumeran link to come up 5431 * 2) check Kumeran Diagnostic register PCS lock loss bit 5432 * 3) if not set the link is locked (all is good), otherwise... 5433 * 4) reset the PHY 5434 * 5) repeat up to 10 times 5435 * Note: this is only called for IGP3 copper when speed is 1gb. 5436 **/ 5437 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) 5438 { 5439 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5440 u32 phy_ctrl; 5441 s32 ret_val; 5442 u16 i, data; 5443 bool link; 5444 5445 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); 5446 5447 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 5448 return E1000_SUCCESS; 5449 5450 /* Make sure link is up before proceeding. If not just return. 5451 * Attempting this while link is negotiating fouled up link 5452 * stability 5453 */ 5454 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 5455 if (!link) 5456 return E1000_SUCCESS; 5457 5458 for (i = 0; i < 10; i++) { 5459 /* read once to clear */ 5460 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 5461 if (ret_val) 5462 return ret_val; 5463 /* and again to get new status */ 5464 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 5465 if (ret_val) 5466 return ret_val; 5467 5468 /* check for PCS lock */ 5469 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) 5470 return E1000_SUCCESS; 5471 5472 /* Issue PHY reset */ 5473 hw->phy.ops.reset(hw); 5474 msec_delay_irq(5); 5475 } 5476 /* Disable GigE link negotiation */ 5477 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 5478 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | 5479 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 5480 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 5481 5482 /* Call gig speed drop workaround on Gig disable before accessing 5483 * any PHY registers 5484 */ 5485 e1000_gig_downshift_workaround_ich8lan(hw); 5486 5487 /* unable to acquire PCS lock */ 5488 return -E1000_ERR_PHY; 5489 } 5490 5491 /** 5492 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state 5493 * @hw: pointer to the HW structure 5494 * @state: boolean value used to set the current Kumeran workaround state 5495 * 5496 * If ICH8, set the current Kumeran workaround state (enabled - TRUE 5497 * /disabled - FALSE). 5498 **/ 5499 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 5500 bool state) 5501 { 5502 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5503 5504 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); 5505 5506 if (hw->mac.type != e1000_ich8lan) { 5507 DEBUGOUT("Workaround applies to ICH8 only.\n"); 5508 return; 5509 } 5510 5511 dev_spec->kmrn_lock_loss_workaround_enabled = state; 5512 5513 return; 5514 } 5515 5516 /** 5517 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 5518 * @hw: pointer to the HW structure 5519 * 5520 * Workaround for 82566 power-down on D3 entry: 5521 * 1) disable gigabit link 5522 * 2) write VR power-down enable 5523 * 3) read it back 5524 * Continue if successful, else issue LCD reset and repeat 5525 **/ 5526 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) 5527 { 5528 u32 reg; 5529 u16 data; 5530 u8 retry = 0; 5531 5532 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); 5533 5534 if (hw->phy.type != e1000_phy_igp_3) 5535 return; 5536 5537 /* Try the workaround twice (if needed) */ 5538 do { 5539 /* Disable link */ 5540 reg = E1000_READ_REG(hw, E1000_PHY_CTRL); 5541 reg |= (E1000_PHY_CTRL_GBE_DISABLE | 5542 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 5543 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); 5544 5545 /* Call gig speed drop workaround on Gig disable before 5546 * accessing any PHY registers 5547 */ 5548 if (hw->mac.type == e1000_ich8lan) 5549 e1000_gig_downshift_workaround_ich8lan(hw); 5550 5551 /* Write VR power-down enable */ 5552 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 5553 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 5554 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL, 5555 data | IGP3_VR_CTRL_MODE_SHUTDOWN); 5556 5557 /* Read it back and test */ 5558 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 5559 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 5560 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) 5561 break; 5562 5563 /* Issue PHY reset and repeat at most one more time */ 5564 reg = E1000_READ_REG(hw, E1000_CTRL); 5565 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); 5566 retry++; 5567 } while (retry); 5568 } 5569 5570 /** 5571 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working 5572 * @hw: pointer to the HW structure 5573 * 5574 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), 5575 * LPLU, Gig disable, MDIC PHY reset): 5576 * 1) Set Kumeran Near-end loopback 5577 * 2) Clear Kumeran Near-end loopback 5578 * Should only be called for ICH8[m] devices with any 1G Phy. 5579 **/ 5580 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) 5581 { 5582 s32 ret_val; 5583 u16 reg_data; 5584 5585 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); 5586 5587 if ((hw->mac.type != e1000_ich8lan) || 5588 (hw->phy.type == e1000_phy_ife)) 5589 return; 5590 5591 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 5592 ®_data); 5593 if (ret_val) 5594 return; 5595 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; 5596 ret_val = e1000_write_kmrn_reg_generic(hw, 5597 E1000_KMRNCTRLSTA_DIAG_OFFSET, 5598 reg_data); 5599 if (ret_val) 5600 return; 5601 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; 5602 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 5603 reg_data); 5604 } 5605 5606 /** 5607 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx 5608 * @hw: pointer to the HW structure 5609 * 5610 * During S0 to Sx transition, it is possible the link remains at gig 5611 * instead of negotiating to a lower speed. Before going to Sx, set 5612 * 'Gig Disable' to force link speed negotiation to a lower speed based on 5613 * the LPLU setting in the NVM or custom setting. For PCH and newer parts, 5614 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also 5615 * needs to be written. 5616 * Parts that support (and are linked to a partner which support) EEE in 5617 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power 5618 * than 10Mbps w/o EEE. 5619 **/ 5620 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) 5621 { 5622 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 5623 u32 phy_ctrl; 5624 s32 ret_val; 5625 5626 DEBUGFUNC("e1000_suspend_workarounds_ich8lan"); 5627 5628 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 5629 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 5630 5631 if (hw->phy.type == e1000_phy_i217) { 5632 u16 phy_reg, device_id = hw->device_id; 5633 5634 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 5635 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || 5636 (device_id == E1000_DEV_ID_PCH_I218_LM3) || 5637 (device_id == E1000_DEV_ID_PCH_I218_V3) || 5638 (hw->mac.type >= e1000_pch_spt)) { 5639 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); 5640 5641 E1000_WRITE_REG(hw, E1000_FEXTNVM6, 5642 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); 5643 } 5644 5645 ret_val = hw->phy.ops.acquire(hw); 5646 if (ret_val) 5647 goto out; 5648 5649 if (!dev_spec->eee_disable) { 5650 u16 eee_advert; 5651 5652 ret_val = 5653 e1000_read_emi_reg_locked(hw, 5654 I217_EEE_ADVERTISEMENT, 5655 &eee_advert); 5656 if (ret_val) 5657 goto release; 5658 5659 /* Disable LPLU if both link partners support 100BaseT 5660 * EEE and 100Full is advertised on both ends of the 5661 * link, and enable Auto Enable LPI since there will 5662 * be no driver to enable LPI while in Sx. 5663 */ 5664 if ((eee_advert & I82579_EEE_100_SUPPORTED) && 5665 (dev_spec->eee_lp_ability & 5666 I82579_EEE_100_SUPPORTED) && 5667 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { 5668 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | 5669 E1000_PHY_CTRL_NOND0A_LPLU); 5670 5671 /* Set Auto Enable LPI after link up */ 5672 hw->phy.ops.read_reg_locked(hw, 5673 I217_LPI_GPIO_CTRL, 5674 &phy_reg); 5675 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; 5676 hw->phy.ops.write_reg_locked(hw, 5677 I217_LPI_GPIO_CTRL, 5678 phy_reg); 5679 } 5680 } 5681 5682 /* For i217 Intel Rapid Start Technology support, 5683 * when the system is going into Sx and no manageability engine 5684 * is present, the driver must configure proxy to reset only on 5685 * power good. LPI (Low Power Idle) state must also reset only 5686 * on power good, as well as the MTA (Multicast table array). 5687 * The SMBus release must also be disabled on LCD reset. 5688 */ 5689 if (!(E1000_READ_REG(hw, E1000_FWSM) & 5690 E1000_ICH_FWSM_FW_VALID)) { 5691 /* Enable proxy to reset only on power good. */ 5692 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL, 5693 &phy_reg); 5694 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; 5695 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 5696 phy_reg); 5697 5698 /* Set bit enable LPI (EEE) to reset only on 5699 * power good. 5700 */ 5701 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg); 5702 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; 5703 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg); 5704 5705 /* Disable the SMB release on LCD reset. */ 5706 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg); 5707 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; 5708 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); 5709 } 5710 5711 /* Enable MTA to reset for Intel Rapid Start Technology 5712 * Support 5713 */ 5714 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg); 5715 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; 5716 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); 5717 5718 release: 5719 hw->phy.ops.release(hw); 5720 } 5721 out: 5722 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 5723 5724 if (hw->mac.type == e1000_ich8lan) 5725 e1000_gig_downshift_workaround_ich8lan(hw); 5726 5727 if (hw->mac.type >= e1000_pchlan) { 5728 e1000_oem_bits_config_ich8lan(hw, FALSE); 5729 5730 /* Reset PHY to activate OEM bits on 82577/8 */ 5731 if (hw->mac.type == e1000_pchlan) 5732 e1000_phy_hw_reset_generic(hw); 5733 5734 ret_val = hw->phy.ops.acquire(hw); 5735 if (ret_val) 5736 return; 5737 e1000_write_smbus_addr(hw); 5738 hw->phy.ops.release(hw); 5739 } 5740 5741 return; 5742 } 5743 5744 /** 5745 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 5746 * @hw: pointer to the HW structure 5747 * 5748 * During Sx to S0 transitions on non-managed devices or managed devices 5749 * on which PHY resets are not blocked, if the PHY registers cannot be 5750 * accessed properly by the s/w toggle the LANPHYPC value to power cycle 5751 * the PHY. 5752 * On i217, setup Intel Rapid Start Technology. 5753 **/ 5754 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw) 5755 { 5756 s32 ret_val; 5757 5758 DEBUGFUNC("e1000_resume_workarounds_pchlan"); 5759 if (hw->mac.type < e1000_pch2lan) 5760 return E1000_SUCCESS; 5761 5762 ret_val = e1000_init_phy_workarounds_pchlan(hw); 5763 if (ret_val) { 5764 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val); 5765 return ret_val; 5766 } 5767 5768 /* For i217 Intel Rapid Start Technology support when the system 5769 * is transitioning from Sx and no manageability engine is present 5770 * configure SMBus to restore on reset, disable proxy, and enable 5771 * the reset on MTA (Multicast table array). 5772 */ 5773 if (hw->phy.type == e1000_phy_i217) { 5774 u16 phy_reg; 5775 5776 ret_val = hw->phy.ops.acquire(hw); 5777 if (ret_val) { 5778 DEBUGOUT("Failed to setup iRST\n"); 5779 return ret_val; 5780 } 5781 5782 /* Clear Auto Enable LPI after link up */ 5783 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); 5784 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; 5785 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); 5786 5787 if (!(E1000_READ_REG(hw, E1000_FWSM) & 5788 E1000_ICH_FWSM_FW_VALID)) { 5789 /* Restore clear on SMB if no manageability engine 5790 * is present 5791 */ 5792 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, 5793 &phy_reg); 5794 if (ret_val) 5795 goto release; 5796 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; 5797 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); 5798 5799 /* Disable Proxy */ 5800 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0); 5801 } 5802 /* Enable reset on MTA */ 5803 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG, 5804 &phy_reg); 5805 if (ret_val) 5806 goto release; 5807 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; 5808 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); 5809 release: 5810 if (ret_val) 5811 DEBUGOUT1("Error %d in resume workarounds\n", ret_val); 5812 hw->phy.ops.release(hw); 5813 return ret_val; 5814 } 5815 return E1000_SUCCESS; 5816 } 5817 5818 /** 5819 * e1000_cleanup_led_ich8lan - Restore the default LED operation 5820 * @hw: pointer to the HW structure 5821 * 5822 * Return the LED back to the default configuration. 5823 **/ 5824 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) 5825 { 5826 DEBUGFUNC("e1000_cleanup_led_ich8lan"); 5827 5828 if (hw->phy.type == e1000_phy_ife) 5829 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 5830 0); 5831 5832 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); 5833 return E1000_SUCCESS; 5834 } 5835 5836 /** 5837 * e1000_led_on_ich8lan - Turn LEDs on 5838 * @hw: pointer to the HW structure 5839 * 5840 * Turn on the LEDs. 5841 **/ 5842 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) 5843 { 5844 DEBUGFUNC("e1000_led_on_ich8lan"); 5845 5846 if (hw->phy.type == e1000_phy_ife) 5847 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 5848 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); 5849 5850 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); 5851 return E1000_SUCCESS; 5852 } 5853 5854 /** 5855 * e1000_led_off_ich8lan - Turn LEDs off 5856 * @hw: pointer to the HW structure 5857 * 5858 * Turn off the LEDs. 5859 **/ 5860 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) 5861 { 5862 DEBUGFUNC("e1000_led_off_ich8lan"); 5863 5864 if (hw->phy.type == e1000_phy_ife) 5865 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 5866 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 5867 5868 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); 5869 return E1000_SUCCESS; 5870 } 5871 5872 /** 5873 * e1000_setup_led_pchlan - Configures SW controllable LED 5874 * @hw: pointer to the HW structure 5875 * 5876 * This prepares the SW controllable LED for use. 5877 **/ 5878 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 5879 { 5880 DEBUGFUNC("e1000_setup_led_pchlan"); 5881 5882 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 5883 (u16)hw->mac.ledctl_mode1); 5884 } 5885 5886 /** 5887 * e1000_cleanup_led_pchlan - Restore the default LED operation 5888 * @hw: pointer to the HW structure 5889 * 5890 * Return the LED back to the default configuration. 5891 **/ 5892 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 5893 { 5894 DEBUGFUNC("e1000_cleanup_led_pchlan"); 5895 5896 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 5897 (u16)hw->mac.ledctl_default); 5898 } 5899 5900 /** 5901 * e1000_led_on_pchlan - Turn LEDs on 5902 * @hw: pointer to the HW structure 5903 * 5904 * Turn on the LEDs. 5905 **/ 5906 static s32 e1000_led_on_pchlan(struct e1000_hw *hw) 5907 { 5908 u16 data = (u16)hw->mac.ledctl_mode2; 5909 u32 i, led; 5910 5911 DEBUGFUNC("e1000_led_on_pchlan"); 5912 5913 /* If no link, then turn LED on by setting the invert bit 5914 * for each LED that's mode is "link_up" in ledctl_mode2. 5915 */ 5916 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 5917 for (i = 0; i < 3; i++) { 5918 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 5919 if ((led & E1000_PHY_LED0_MODE_MASK) != 5920 E1000_LEDCTL_MODE_LINK_UP) 5921 continue; 5922 if (led & E1000_PHY_LED0_IVRT) 5923 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 5924 else 5925 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 5926 } 5927 } 5928 5929 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 5930 } 5931 5932 /** 5933 * e1000_led_off_pchlan - Turn LEDs off 5934 * @hw: pointer to the HW structure 5935 * 5936 * Turn off the LEDs. 5937 **/ 5938 static s32 e1000_led_off_pchlan(struct e1000_hw *hw) 5939 { 5940 u16 data = (u16)hw->mac.ledctl_mode1; 5941 u32 i, led; 5942 5943 DEBUGFUNC("e1000_led_off_pchlan"); 5944 5945 /* If no link, then turn LED off by clearing the invert bit 5946 * for each LED that's mode is "link_up" in ledctl_mode1. 5947 */ 5948 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 5949 for (i = 0; i < 3; i++) { 5950 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; 5951 if ((led & E1000_PHY_LED0_MODE_MASK) != 5952 E1000_LEDCTL_MODE_LINK_UP) 5953 continue; 5954 if (led & E1000_PHY_LED0_IVRT) 5955 data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); 5956 else 5957 data |= (E1000_PHY_LED0_IVRT << (i * 5)); 5958 } 5959 } 5960 5961 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 5962 } 5963 5964 /** 5965 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset 5966 * @hw: pointer to the HW structure 5967 * 5968 * Read appropriate register for the config done bit for completion status 5969 * and configure the PHY through s/w for EEPROM-less parts. 5970 * 5971 * NOTE: some silicon which is EEPROM-less will fail trying to read the 5972 * config done bit, so only an error is logged and continues. If we were 5973 * to return with error, EEPROM-less silicon would not be able to be reset 5974 * or change link. 5975 **/ 5976 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) 5977 { 5978 s32 ret_val = E1000_SUCCESS; 5979 u32 bank = 0; 5980 u32 status; 5981 5982 DEBUGFUNC("e1000_get_cfg_done_ich8lan"); 5983 5984 e1000_get_cfg_done_generic(hw); 5985 5986 /* Wait for indication from h/w that it has completed basic config */ 5987 if (hw->mac.type >= e1000_ich10lan) { 5988 e1000_lan_init_done_ich8lan(hw); 5989 } else { 5990 ret_val = e1000_get_auto_rd_done_generic(hw); 5991 if (ret_val) { 5992 /* When auto config read does not complete, do not 5993 * return with an error. This can happen in situations 5994 * where there is no eeprom and prevents getting link. 5995 */ 5996 DEBUGOUT("Auto Read Done did not complete\n"); 5997 ret_val = E1000_SUCCESS; 5998 } 5999 } 6000 6001 /* Clear PHY Reset Asserted bit */ 6002 status = E1000_READ_REG(hw, E1000_STATUS); 6003 if (status & E1000_STATUS_PHYRA) 6004 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA); 6005 else 6006 DEBUGOUT("PHY Reset Asserted not set - needs delay\n"); 6007 6008 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 6009 if (hw->mac.type <= e1000_ich9lan) { 6010 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && 6011 (hw->phy.type == e1000_phy_igp_3)) { 6012 e1000_phy_init_script_igp3(hw); 6013 } 6014 } else { 6015 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { 6016 /* Maybe we should do a basic PHY config */ 6017 DEBUGOUT("EEPROM not present\n"); 6018 ret_val = -E1000_ERR_CONFIG; 6019 } 6020 } 6021 6022 return ret_val; 6023 } 6024 6025 /** 6026 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down 6027 * @hw: pointer to the HW structure 6028 * 6029 * In the case of a PHY power down to save power, or to turn off link during a 6030 * driver unload, or wake on lan is not enabled, remove the link. 6031 **/ 6032 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) 6033 { 6034 /* If the management interface is not enabled, then power down */ 6035 if (!(hw->mac.ops.check_mng_mode(hw) || 6036 hw->phy.ops.check_reset_block(hw))) 6037 e1000_power_down_phy_copper(hw); 6038 6039 return; 6040 } 6041 6042 /** 6043 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters 6044 * @hw: pointer to the HW structure 6045 * 6046 * Clears hardware counters specific to the silicon family and calls 6047 * clear_hw_cntrs_generic to clear all general purpose counters. 6048 **/ 6049 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) 6050 { 6051 u16 phy_data; 6052 s32 ret_val; 6053 6054 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); 6055 6056 e1000_clear_hw_cntrs_base_generic(hw); 6057 6058 E1000_READ_REG(hw, E1000_ALGNERRC); 6059 E1000_READ_REG(hw, E1000_RXERRC); 6060 E1000_READ_REG(hw, E1000_TNCRS); 6061 E1000_READ_REG(hw, E1000_CEXTERR); 6062 E1000_READ_REG(hw, E1000_TSCTC); 6063 E1000_READ_REG(hw, E1000_TSCTFC); 6064 6065 E1000_READ_REG(hw, E1000_MGTPRC); 6066 E1000_READ_REG(hw, E1000_MGTPDC); 6067 E1000_READ_REG(hw, E1000_MGTPTC); 6068 6069 E1000_READ_REG(hw, E1000_IAC); 6070 E1000_READ_REG(hw, E1000_ICRXOC); 6071 6072 /* Clear PHY statistics registers */ 6073 if ((hw->phy.type == e1000_phy_82578) || 6074 (hw->phy.type == e1000_phy_82579) || 6075 (hw->phy.type == e1000_phy_i217) || 6076 (hw->phy.type == e1000_phy_82577)) { 6077 ret_val = hw->phy.ops.acquire(hw); 6078 if (ret_val) 6079 return; 6080 ret_val = hw->phy.ops.set_page(hw, 6081 HV_STATS_PAGE << IGP_PAGE_SHIFT); 6082 if (ret_val) 6083 goto release; 6084 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 6085 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 6086 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 6087 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 6088 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 6089 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 6090 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 6091 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 6092 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 6093 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 6094 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 6095 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 6096 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 6097 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 6098 release: 6099 hw->phy.ops.release(hw); 6100 } 6101 } 6102 6103