1 /* $OpenBSD: ixgbe_82598.c,v 1.11 2013/08/05 19:58:06 mikeb Exp $ */ 2 3 /****************************************************************************** 4 5 Copyright (c) 2001-2013, Intel Corporation 6 All rights reserved. 7 8 Redistribution and use in source and binary forms, with or without 9 modification, are permitted provided that the following conditions are met: 10 11 1. Redistributions of source code must retain the above copyright notice, 12 this list of conditions and the following disclaimer. 13 14 2. Redistributions in binary form must reproduce the above copyright 15 notice, this list of conditions and the following disclaimer in the 16 documentation and/or other materials provided with the distribution. 17 18 3. Neither the name of the Intel Corporation nor the names of its 19 contributors may be used to endorse or promote products derived from 20 this software without specific prior written permission. 21 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 POSSIBILITY OF SUCH DAMAGE. 33 34 ******************************************************************************/ 35 /* FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c 251964 Jun 18 21:28:19 2013 UTC */ 36 37 #include <dev/pci/ixgbe.h> 38 #include <dev/pci/ixgbe_type.h> 39 40 uint32_t ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw); 41 int32_t ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 42 ixgbe_link_speed *speed, 43 bool *autoneg); 44 enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); 45 int32_t ixgbe_fc_enable_82598(struct ixgbe_hw *hw); 46 int32_t ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 47 bool autoneg_wait_to_complete); 48 int32_t ixgbe_validate_link_ready(struct ixgbe_hw *hw); 49 int32_t ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 50 ixgbe_link_speed *speed, bool *link_up, 51 bool link_up_wait_to_complete); 52 int32_t ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 53 ixgbe_link_speed speed, 54 bool autoneg_wait_to_complete); 55 int32_t ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 56 ixgbe_link_speed speed, 57 bool autoneg_wait_to_complete); 58 int32_t ixgbe_reset_hw_82598(struct ixgbe_hw *hw); 59 int32_t ixgbe_start_hw_82598(struct ixgbe_hw *hw); 60 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw); 61 int32_t ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq); 62 int32_t ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq); 63 int32_t ixgbe_set_vfta_82598(struct ixgbe_hw *hw, uint32_t vlan, 64 uint32_t vind, bool vlan_on); 65 int32_t ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); 66 int32_t ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t *val); 67 int32_t ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t val); 68 int32_t ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, uint8_t dev_addr, 69 uint8_t byte_offset, uint8_t *eeprom_data); 70 int32_t ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, uint8_t byte_offset, 71 uint8_t *eeprom_data); 72 uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); 73 int32_t ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); 74 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); 75 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); 76 77 /** 78 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 79 * @hw: pointer to the HW structure 80 * 81 * The defaults for 82598 should be in the range of 50us to 50ms, 82 * however the hardware default for these parts is 500us to 1ms which is less 83 * than the 10ms recommended by the pci-e spec. To address this we need to 84 * increase the value to either 10ms to 250ms for capability version 1 config, 85 * or 16ms to 55ms for version 2. 86 **/ 87 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 88 { 89 uint32_t gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 90 uint16_t pcie_devctl2; 91 92 /* only take action if timeout value is defaulted to 0 */ 93 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 94 goto out; 95 96 /* 97 * if capababilities version is type 1 we can write the 98 * timeout of 10ms to 250ms through the GCR register 99 */ 100 if (!(gcr & IXGBE_GCR_CAP_VER2)) { 101 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; 102 goto out; 103 } 104 105 /* 106 * for version 2 capabilities we need to write the config space 107 * directly in order to set the completion timeout value for 108 * 16ms to 55ms 109 */ 110 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); 111 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 112 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); 113 out: 114 /* disable completion timeout resend */ 115 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 116 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 117 } 118 119 /** 120 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type 121 * @hw: pointer to hardware structure 122 * 123 * Initialize the function pointers and assign the MAC type for 82598. 124 * Does not touch the hardware. 125 **/ 126 int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw) 127 { 128 struct ixgbe_mac_info *mac = &hw->mac; 129 struct ixgbe_phy_info *phy = &hw->phy; 130 int32_t ret_val; 131 132 DEBUGFUNC("ixgbe_init_ops_82598"); 133 134 ret_val = ixgbe_init_phy_ops_generic(hw); 135 ret_val = ixgbe_init_ops_generic(hw); 136 137 /* PHY */ 138 phy->ops.init = &ixgbe_init_phy_ops_82598; 139 140 /* MAC */ 141 mac->ops.start_hw = &ixgbe_start_hw_82598; 142 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598; 143 mac->ops.reset_hw = &ixgbe_reset_hw_82598; 144 mac->ops.get_media_type = &ixgbe_get_media_type_82598; 145 mac->ops.get_supported_physical_layer = 146 &ixgbe_get_supported_physical_layer_82598; 147 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598; 148 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598; 149 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598; 150 151 /* RAR, Multicast, VLAN */ 152 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598; 153 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598; 154 mac->ops.set_vfta = &ixgbe_set_vfta_82598; 155 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598; 156 157 /* Flow Control */ 158 mac->ops.fc_enable = &ixgbe_fc_enable_82598; 159 160 mac->mcft_size = 128; 161 mac->vft_size = 128; 162 mac->num_rar_entries = 16; 163 mac->rx_pb_size = 512; 164 mac->max_tx_queues = 32; 165 mac->max_rx_queues = 64; 166 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 167 168 /* SFP+ Module */ 169 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598; 170 171 /* Link */ 172 mac->ops.check_link = &ixgbe_check_mac_link_82598; 173 mac->ops.setup_link = &ixgbe_setup_mac_link_82598; 174 mac->ops.flap_tx_laser = NULL; 175 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598; 176 177 return ret_val; 178 } 179 180 /** 181 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init 182 * @hw: pointer to hardware structure 183 * 184 * Initialize any function pointers that were not able to be 185 * set during init_shared_code because the PHY/SFP type was 186 * not known. Perform the SFP init if necessary. 187 * 188 **/ 189 int32_t ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) 190 { 191 struct ixgbe_mac_info *mac = &hw->mac; 192 struct ixgbe_phy_info *phy = &hw->phy; 193 int32_t ret_val = IXGBE_SUCCESS; 194 uint16_t list_offset, data_offset; 195 196 DEBUGFUNC("ixgbe_init_phy_ops_82598"); 197 198 /* Identify the PHY */ 199 phy->ops.identify(hw); 200 201 /* Overwrite the link function pointers if copper PHY */ 202 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 203 mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 204 mac->ops.get_link_capabilities = 205 &ixgbe_get_copper_link_capabilities_generic; 206 } 207 208 switch (hw->phy.type) { 209 case ixgbe_phy_tn: 210 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 211 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 212 phy->ops.get_firmware_version = 213 &ixgbe_get_phy_firmware_version_tnx; 214 break; 215 case ixgbe_phy_nl: 216 phy->ops.reset = &ixgbe_reset_phy_nl; 217 218 /* Call SFP+ identify routine to get the SFP+ module type */ 219 ret_val = phy->ops.identify_sfp(hw); 220 if (ret_val != IXGBE_SUCCESS) 221 goto out; 222 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { 223 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 224 goto out; 225 } 226 227 /* Check to see if SFP+ module is supported */ 228 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 229 &list_offset, 230 &data_offset); 231 if (ret_val != IXGBE_SUCCESS) { 232 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 233 goto out; 234 } 235 break; 236 default: 237 break; 238 } 239 240 out: 241 return ret_val; 242 } 243 244 /** 245 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx 246 * @hw: pointer to hardware structure 247 * 248 * Starts the hardware using the generic start_hw function. 249 * Disables relaxed ordering, then set pcie completion timeout 250 * 251 **/ 252 int32_t ixgbe_start_hw_82598(struct ixgbe_hw *hw) 253 { 254 uint32_t regval; 255 uint32_t i; 256 int32_t ret_val = IXGBE_SUCCESS; 257 258 DEBUGFUNC("ixgbe_start_hw_82598"); 259 260 ret_val = ixgbe_start_hw_generic(hw); 261 262 /* Disable relaxed ordering */ 263 for (i = 0; ((i < hw->mac.max_tx_queues) && 264 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 265 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 266 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 267 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 268 } 269 270 for (i = 0; ((i < hw->mac.max_rx_queues) && 271 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 272 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 273 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 274 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 275 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 276 } 277 278 /* set the completion timeout for interface */ 279 if (ret_val == IXGBE_SUCCESS) 280 ixgbe_set_pcie_completion_timeout(hw); 281 282 return ret_val; 283 } 284 285 /** 286 * ixgbe_get_link_capabilities_82598 - Determines link capabilities 287 * @hw: pointer to hardware structure 288 * @speed: pointer to link speed 289 * @autoneg: boolean auto-negotiation value 290 * 291 * Determines the link capabilities by reading the AUTOC register. 292 **/ 293 int32_t ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 294 ixgbe_link_speed *speed, 295 bool *autoneg) 296 { 297 int32_t status = IXGBE_SUCCESS; 298 uint32_t autoc = 0; 299 300 DEBUGFUNC("ixgbe_get_link_capabilities_82598"); 301 302 /* 303 * Determine link capabilities based on the stored value of AUTOC, 304 * which represents EEPROM defaults. If AUTOC value has not been 305 * stored, use the current register value. 306 */ 307 if (hw->mac.orig_link_settings_stored) 308 autoc = hw->mac.orig_autoc; 309 else 310 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 311 312 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 313 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 314 *speed = IXGBE_LINK_SPEED_1GB_FULL; 315 *autoneg = FALSE; 316 break; 317 318 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 319 *speed = IXGBE_LINK_SPEED_10GB_FULL; 320 *autoneg = FALSE; 321 break; 322 323 case IXGBE_AUTOC_LMS_1G_AN: 324 *speed = IXGBE_LINK_SPEED_1GB_FULL; 325 *autoneg = TRUE; 326 break; 327 328 case IXGBE_AUTOC_LMS_KX4_AN: 329 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 330 *speed = IXGBE_LINK_SPEED_UNKNOWN; 331 if (autoc & IXGBE_AUTOC_KX4_SUPP) 332 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 333 if (autoc & IXGBE_AUTOC_KX_SUPP) 334 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 335 *autoneg = TRUE; 336 break; 337 338 default: 339 status = IXGBE_ERR_LINK_SETUP; 340 break; 341 } 342 343 return status; 344 } 345 346 /** 347 * ixgbe_get_media_type_82598 - Determines media type 348 * @hw: pointer to hardware structure 349 * 350 * Returns the media type (fiber, copper, backplane) 351 **/ 352 enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) 353 { 354 enum ixgbe_media_type media_type; 355 356 DEBUGFUNC("ixgbe_get_media_type_82598"); 357 358 /* Detect if there is a copper PHY attached. */ 359 switch (hw->phy.type) { 360 case ixgbe_phy_cu_unknown: 361 case ixgbe_phy_tn: 362 media_type = ixgbe_media_type_copper; 363 goto out; 364 default: 365 break; 366 } 367 368 /* Media type for I82598 is based on device ID */ 369 switch (hw->device_id) { 370 case IXGBE_DEV_ID_82598: 371 case IXGBE_DEV_ID_82598_BX: 372 /* Default device ID is mezzanine card KX/KX4 */ 373 media_type = ixgbe_media_type_backplane; 374 break; 375 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 376 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 377 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 378 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 379 case IXGBE_DEV_ID_82598EB_XF_LR: 380 case IXGBE_DEV_ID_82598EB_SFP_LOM: 381 media_type = ixgbe_media_type_fiber; 382 break; 383 case IXGBE_DEV_ID_82598EB_CX4: 384 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 385 media_type = ixgbe_media_type_cx4; 386 break; 387 case IXGBE_DEV_ID_82598AT: 388 case IXGBE_DEV_ID_82598AT2: 389 media_type = ixgbe_media_type_copper; 390 break; 391 default: 392 media_type = ixgbe_media_type_unknown; 393 break; 394 } 395 out: 396 return media_type; 397 } 398 399 /** 400 * ixgbe_fc_enable_82598 - Enable flow control 401 * @hw: pointer to hardware structure 402 * 403 * Enable flow control according to the current settings. 404 **/ 405 int32_t ixgbe_fc_enable_82598(struct ixgbe_hw *hw) 406 { 407 int32_t ret_val = IXGBE_SUCCESS; 408 uint32_t fctrl_reg; 409 uint32_t rmcs_reg; 410 uint32_t reg; 411 uint32_t fcrtl, fcrth; 412 uint32_t link_speed = 0; 413 int i; 414 bool link_up; 415 416 DEBUGFUNC("ixgbe_fc_enable_82598"); 417 418 /* Validate the water mark configuration */ 419 if (!hw->fc.pause_time) { 420 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 421 goto out; 422 } 423 424 /* Low water mark of zero causes XOFF floods */ 425 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 426 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 427 hw->fc.high_water[i]) { 428 if (!hw->fc.low_water[i] || 429 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 430 DEBUGOUT("Invalid water mark configuration\n"); 431 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 432 goto out; 433 } 434 } 435 } 436 437 /* 438 * On 82598 having Rx FC on causes resets while doing 1G 439 * so if it's on turn it off once we know link_speed. For 440 * more details see 82598 Specification update. 441 */ 442 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); 443 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { 444 switch (hw->fc.requested_mode) { 445 case ixgbe_fc_full: 446 hw->fc.requested_mode = ixgbe_fc_tx_pause; 447 break; 448 case ixgbe_fc_rx_pause: 449 hw->fc.requested_mode = ixgbe_fc_none; 450 break; 451 default: 452 /* no change */ 453 break; 454 } 455 } 456 457 /* Negotiate the fc mode to use */ 458 ixgbe_fc_autoneg(hw); 459 460 /* Disable any previous flow control settings */ 461 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 462 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 463 464 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 465 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 466 467 /* 468 * The possible values of fc.current_mode are: 469 * 0: Flow control is completely disabled 470 * 1: Rx flow control is enabled (we can receive pause frames, 471 * but not send pause frames). 472 * 2: Tx flow control is enabled (we can send pause frames but 473 * we do not support receiving pause frames). 474 * 3: Both Rx and Tx flow control (symmetric) are enabled. 475 * other: Invalid. 476 */ 477 switch (hw->fc.current_mode) { 478 case ixgbe_fc_none: 479 /* 480 * Flow control is disabled by software override or autoneg. 481 * The code below will actually disable it in the HW. 482 */ 483 break; 484 case ixgbe_fc_rx_pause: 485 /* 486 * Rx Flow control is enabled and Tx Flow control is 487 * disabled by software override. Since there really 488 * isn't a way to advertise that we are capable of RX 489 * Pause ONLY, we will advertise that we support both 490 * symmetric and asymmetric Rx PAUSE. Later, we will 491 * disable the adapter's ability to send PAUSE frames. 492 */ 493 fctrl_reg |= IXGBE_FCTRL_RFCE; 494 break; 495 case ixgbe_fc_tx_pause: 496 /* 497 * Tx Flow control is enabled, and Rx Flow control is 498 * disabled by software override. 499 */ 500 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 501 break; 502 case ixgbe_fc_full: 503 /* Flow control (both Rx and Tx) is enabled by SW override. */ 504 fctrl_reg |= IXGBE_FCTRL_RFCE; 505 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 506 break; 507 default: 508 DEBUGOUT("Flow control param set incorrectly\n"); 509 ret_val = IXGBE_ERR_CONFIG; 510 goto out; 511 break; 512 } 513 514 /* Set 802.3x based flow control settings. */ 515 fctrl_reg |= IXGBE_FCTRL_DPF; 516 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 517 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 518 519 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 520 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 521 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 522 hw->fc.high_water[i]) { 523 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 524 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 525 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 526 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 527 } else { 528 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); 529 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); 530 } 531 532 } 533 534 /* Configure pause time (2 TCs per register) */ 535 reg = hw->fc.pause_time * 0x00010001; 536 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 537 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 538 539 /* Configure flow control refresh threshold value */ 540 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 541 542 out: 543 return ret_val; 544 } 545 546 /** 547 * ixgbe_start_mac_link_82598 - Configures MAC link settings 548 * @hw: pointer to hardware structure 549 * 550 * Configures link settings based on values in the ixgbe_hw struct. 551 * Restarts the link. Performs autonegotiation if needed. 552 **/ 553 int32_t ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 554 bool autoneg_wait_to_complete) 555 { 556 uint32_t autoc_reg; 557 uint32_t links_reg; 558 uint32_t i; 559 int32_t status = IXGBE_SUCCESS; 560 561 DEBUGFUNC("ixgbe_start_mac_link_82598"); 562 563 /* Restart link */ 564 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 565 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 566 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 567 568 /* Only poll for autoneg to complete if specified to do so */ 569 if (autoneg_wait_to_complete) { 570 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 571 IXGBE_AUTOC_LMS_KX4_AN || 572 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 573 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 574 links_reg = 0; /* Just in case Autoneg time = 0 */ 575 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 576 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 577 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 578 break; 579 msec_delay(100); 580 } 581 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 582 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 583 DEBUGOUT("Autonegotiation did not complete.\n"); 584 } 585 } 586 } 587 588 /* Add delay to filter out noises during initial link setup */ 589 msec_delay(50); 590 591 return status; 592 } 593 594 /** 595 * ixgbe_validate_link_ready - Function looks for phy link 596 * @hw: pointer to hardware structure 597 * 598 * Function indicates success when phy link is available. If phy is not ready 599 * within 5 seconds of MAC indicating link, the function returns error. 600 **/ 601 int32_t ixgbe_validate_link_ready(struct ixgbe_hw *hw) 602 { 603 uint32_t timeout; 604 uint16_t an_reg; 605 606 if (hw->device_id != IXGBE_DEV_ID_82598AT2) 607 return IXGBE_SUCCESS; 608 609 for (timeout = 0; 610 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { 611 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, 612 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); 613 614 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && 615 (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) 616 break; 617 618 msec_delay(100); 619 } 620 621 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { 622 DEBUGOUT("Link was indicated but link is down\n"); 623 return IXGBE_ERR_LINK_SETUP; 624 } 625 626 return IXGBE_SUCCESS; 627 } 628 629 /** 630 * ixgbe_check_mac_link_82598 - Get link/speed status 631 * @hw: pointer to hardware structure 632 * @speed: pointer to link speed 633 * @link_up: TRUE is link is up, FALSE otherwise 634 * @link_up_wait_to_complete: bool used to wait for link up or not 635 * 636 * Reads the links register to determine if link is up and the current speed 637 **/ 638 int32_t ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 639 ixgbe_link_speed *speed, bool *link_up, 640 bool link_up_wait_to_complete) 641 { 642 uint32_t links_reg; 643 uint32_t i; 644 uint16_t link_reg, adapt_comp_reg; 645 646 DEBUGFUNC("ixgbe_check_mac_link_82598"); 647 648 /* 649 * SERDES PHY requires us to read link status from undocumented 650 * register 0xC79F. Bit 0 set indicates link is up/ready; clear 651 * indicates link down. OxC00C is read to check that the XAUI lanes 652 * are active. Bit 0 clear indicates active; set indicates inactive. 653 */ 654 if (hw->phy.type == ixgbe_phy_nl) { 655 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); 656 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); 657 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, 658 &adapt_comp_reg); 659 if (link_up_wait_to_complete) { 660 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 661 if ((link_reg & 1) && 662 ((adapt_comp_reg & 1) == 0)) { 663 *link_up = TRUE; 664 break; 665 } else { 666 *link_up = FALSE; 667 } 668 msec_delay(100); 669 hw->phy.ops.read_reg(hw, 0xC79F, 670 IXGBE_TWINAX_DEV, 671 &link_reg); 672 hw->phy.ops.read_reg(hw, 0xC00C, 673 IXGBE_TWINAX_DEV, 674 &adapt_comp_reg); 675 } 676 } else { 677 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 678 *link_up = TRUE; 679 else 680 *link_up = FALSE; 681 } 682 683 if (*link_up == FALSE) 684 goto out; 685 } 686 687 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 688 if (link_up_wait_to_complete) { 689 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 690 if (links_reg & IXGBE_LINKS_UP) { 691 *link_up = TRUE; 692 break; 693 } else { 694 *link_up = FALSE; 695 } 696 msec_delay(100); 697 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 698 } 699 } else { 700 if (links_reg & IXGBE_LINKS_UP) 701 *link_up = TRUE; 702 else 703 *link_up = FALSE; 704 } 705 706 if (links_reg & IXGBE_LINKS_SPEED) 707 *speed = IXGBE_LINK_SPEED_10GB_FULL; 708 else 709 *speed = IXGBE_LINK_SPEED_1GB_FULL; 710 711 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) && 712 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) 713 *link_up = FALSE; 714 715 out: 716 return IXGBE_SUCCESS; 717 } 718 719 /** 720 * ixgbe_setup_mac_link_82598 - Set MAC link speed 721 * @hw: pointer to hardware structure 722 * @speed: new link speed 723 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 724 * 725 * Set the link speed in the AUTOC register and restarts link. 726 **/ 727 int32_t ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 728 ixgbe_link_speed speed, 729 bool autoneg_wait_to_complete) 730 { 731 bool autoneg = FALSE; 732 int32_t status = IXGBE_SUCCESS; 733 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 734 uint32_t curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 735 uint32_t autoc = curr_autoc; 736 uint32_t link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 737 738 DEBUGFUNC("ixgbe_setup_mac_link_82598"); 739 740 /* Check to see if speed passed in is supported. */ 741 ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); 742 speed &= link_capabilities; 743 744 if (speed == IXGBE_LINK_SPEED_UNKNOWN) 745 status = IXGBE_ERR_LINK_SETUP; 746 747 /* Set KX4/KX support according to speed requested */ 748 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 749 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 750 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 751 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 752 autoc |= IXGBE_AUTOC_KX4_SUPP; 753 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 754 autoc |= IXGBE_AUTOC_KX_SUPP; 755 if (autoc != curr_autoc) 756 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 757 } 758 759 if (status == IXGBE_SUCCESS) { 760 /* 761 * Setup and restart the link based on the new values in 762 * ixgbe_hw This will write the AUTOC register based on the new 763 * stored values 764 */ 765 status = ixgbe_start_mac_link_82598(hw, 766 autoneg_wait_to_complete); 767 } 768 769 return status; 770 } 771 772 773 /** 774 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field 775 * @hw: pointer to hardware structure 776 * @speed: new link speed 777 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 778 * 779 * Sets the link speed in the AUTOC register in the MAC and restarts link. 780 **/ 781 int32_t ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 782 ixgbe_link_speed speed, 783 bool autoneg_wait_to_complete) 784 { 785 int32_t status; 786 787 DEBUGFUNC("ixgbe_setup_copper_link_82598"); 788 789 /* Setup the PHY according to input speed */ 790 status = hw->phy.ops.setup_link_speed(hw, speed, 791 autoneg_wait_to_complete); 792 /* Set up MAC */ 793 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 794 795 return status; 796 } 797 798 /** 799 * ixgbe_reset_hw_82598 - Performs hardware reset 800 * @hw: pointer to hardware structure 801 * 802 * Resets the hardware by resetting the transmit and receive units, masks and 803 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 804 * reset. 805 **/ 806 int32_t ixgbe_reset_hw_82598(struct ixgbe_hw *hw) 807 { 808 int32_t status = IXGBE_SUCCESS; 809 int32_t phy_status = IXGBE_SUCCESS; 810 uint32_t ctrl; 811 uint32_t gheccr; 812 uint32_t i; 813 uint32_t autoc; 814 uint8_t analog_val; 815 816 DEBUGFUNC("ixgbe_reset_hw_82598"); 817 818 /* Call adapter stop to disable tx/rx and clear interrupts */ 819 status = hw->mac.ops.stop_adapter(hw); 820 if (status != IXGBE_SUCCESS) 821 goto reset_hw_out; 822 823 /* 824 * Power up the Atlas Tx lanes if they are currently powered down. 825 * Atlas Tx lanes are powered down for MAC loopback tests, but 826 * they are not automatically restored on reset. 827 */ 828 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 829 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 830 /* Enable Tx Atlas so packets can be transmitted again */ 831 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 832 &analog_val); 833 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 834 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 835 analog_val); 836 837 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 838 &analog_val); 839 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 840 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 841 analog_val); 842 843 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 844 &analog_val); 845 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 846 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 847 analog_val); 848 849 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 850 &analog_val); 851 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 852 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 853 analog_val); 854 } 855 856 /* Reset PHY */ 857 if (hw->phy.reset_disable == FALSE) { 858 /* PHY ops must be identified and initialized prior to reset */ 859 860 /* Init PHY and function pointers, perform SFP setup */ 861 phy_status = hw->phy.ops.init(hw); 862 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) 863 goto reset_hw_out; 864 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 865 goto mac_reset_top; 866 867 hw->phy.ops.reset(hw); 868 } 869 870 mac_reset_top: 871 /* 872 * Issue global reset to the MAC. This needs to be a SW reset. 873 * If link reset is used, it might reset the MAC when mng is using it 874 */ 875 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; 876 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 877 IXGBE_WRITE_FLUSH(hw); 878 879 /* Poll for reset bit to self-clear indicating reset is complete */ 880 for (i = 0; i < 10; i++) { 881 usec_delay(1); 882 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 883 if (!(ctrl & IXGBE_CTRL_RST)) 884 break; 885 } 886 if (ctrl & IXGBE_CTRL_RST) { 887 status = IXGBE_ERR_RESET_FAILED; 888 DEBUGOUT("Reset polling failed to complete.\n"); 889 } 890 891 msec_delay(50); 892 893 /* 894 * Double resets are required for recovery from certain error 895 * conditions. Between resets, it is necessary to stall to allow time 896 * for any pending HW events to complete. 897 */ 898 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 899 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 900 goto mac_reset_top; 901 } 902 903 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 904 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); 905 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); 906 907 /* 908 * Store the original AUTOC value if it has not been 909 * stored off yet. Otherwise restore the stored original 910 * AUTOC value since the reset operation sets back to deaults. 911 */ 912 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 913 if (hw->mac.orig_link_settings_stored == FALSE) { 914 hw->mac.orig_autoc = autoc; 915 hw->mac.orig_link_settings_stored = TRUE; 916 } else if (autoc != hw->mac.orig_autoc) { 917 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 918 } 919 920 /* Store the permanent mac address */ 921 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 922 923 /* 924 * Store MAC address from RAR0, clear receive address registers, and 925 * clear the multicast table 926 */ 927 hw->mac.ops.init_rx_addrs(hw); 928 929 reset_hw_out: 930 if (phy_status != IXGBE_SUCCESS) 931 status = phy_status; 932 933 return status; 934 } 935 936 /** 937 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address 938 * @hw: pointer to hardware struct 939 * @rar: receive address register index to associate with a VMDq index 940 * @vmdq: VMDq set index 941 **/ 942 int32_t ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq) 943 { 944 uint32_t rar_high; 945 uint32_t rar_entries = hw->mac.num_rar_entries; 946 947 DEBUGFUNC("ixgbe_set_vmdq_82598"); 948 949 /* Make sure we are using a valid rar index range */ 950 if (rar >= rar_entries) { 951 DEBUGOUT1("RAR index %d is out of range.\n", rar); 952 return IXGBE_ERR_INVALID_ARGUMENT; 953 } 954 955 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 956 rar_high &= ~IXGBE_RAH_VIND_MASK; 957 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); 958 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 959 return IXGBE_SUCCESS; 960 } 961 962 /** 963 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address 964 * @hw: pointer to hardware struct 965 * @rar: receive address register index to associate with a VMDq index 966 * @vmdq: VMDq clear index (not used in 82598, but elsewhere) 967 **/ 968 int32_t ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq) 969 { 970 uint32_t rar_high; 971 uint32_t rar_entries = hw->mac.num_rar_entries; 972 973 /* Make sure we are using a valid rar index range */ 974 if (rar >= rar_entries) { 975 DEBUGOUT1("RAR index %d is out of range.\n", rar); 976 return IXGBE_ERR_INVALID_ARGUMENT; 977 } 978 979 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 980 if (rar_high & IXGBE_RAH_VIND_MASK) { 981 rar_high &= ~IXGBE_RAH_VIND_MASK; 982 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 983 } 984 985 return IXGBE_SUCCESS; 986 } 987 988 /** 989 * ixgbe_set_vfta_82598 - Set VLAN filter table 990 * @hw: pointer to hardware structure 991 * @vlan: VLAN id to write to VLAN filter 992 * @vind: VMDq output index that maps queue to VLAN id in VFTA 993 * @vlan_on: boolean flag to turn on/off VLAN in VFTA 994 * 995 * Turn on/off specified VLAN in the VLAN filter table. 996 **/ 997 int32_t ixgbe_set_vfta_82598(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind, 998 bool vlan_on) 999 { 1000 uint32_t regindex; 1001 uint32_t bitindex; 1002 uint32_t bits; 1003 uint32_t vftabyte; 1004 1005 DEBUGFUNC("ixgbe_set_vfta_82598"); 1006 1007 if (vlan > 4095) 1008 return IXGBE_ERR_PARAM; 1009 1010 /* Determine 32-bit word position in array */ 1011 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ 1012 1013 /* Determine the location of the (VMD) queue index */ 1014 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ 1015 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ 1016 1017 /* Set the nibble for VMD queue index */ 1018 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); 1019 bits &= (~(0x0F << bitindex)); 1020 bits |= (vind << bitindex); 1021 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); 1022 1023 /* Determine the location of the bit for this VLAN id */ 1024 bitindex = vlan & 0x1F; /* lower five bits */ 1025 1026 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 1027 if (vlan_on) 1028 /* Turn on this VLAN id */ 1029 bits |= (1 << bitindex); 1030 else 1031 /* Turn off this VLAN id */ 1032 bits &= ~(1 << bitindex); 1033 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 1034 1035 return IXGBE_SUCCESS; 1036 } 1037 1038 /** 1039 * ixgbe_clear_vfta_82598 - Clear VLAN filter table 1040 * @hw: pointer to hardware structure 1041 * 1042 * Clears the VLAN filer table, and the VMDq index associated with the filter 1043 **/ 1044 int32_t ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) 1045 { 1046 uint32_t offset; 1047 uint32_t vlanbyte; 1048 1049 DEBUGFUNC("ixgbe_clear_vfta_82598"); 1050 1051 for (offset = 0; offset < hw->mac.vft_size; offset++) 1052 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 1053 1054 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 1055 for (offset = 0; offset < hw->mac.vft_size; offset++) 1056 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 1057 0); 1058 1059 return IXGBE_SUCCESS; 1060 } 1061 1062 /** 1063 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 1064 * @hw: pointer to hardware structure 1065 * @reg: analog register to read 1066 * @val: read value 1067 * 1068 * Performs read operation to Atlas analog register specified. 1069 **/ 1070 int32_t ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t *val) 1071 { 1072 uint32_t atlas_ctl; 1073 1074 DEBUGFUNC("ixgbe_read_analog_reg8_82598"); 1075 1076 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 1077 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 1078 IXGBE_WRITE_FLUSH(hw); 1079 usec_delay(10); 1080 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 1081 *val = (uint8_t)atlas_ctl; 1082 1083 return IXGBE_SUCCESS; 1084 } 1085 1086 /** 1087 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register 1088 * @hw: pointer to hardware structure 1089 * @reg: atlas register to write 1090 * @val: value to write 1091 * 1092 * Performs write operation to Atlas analog register specified. 1093 **/ 1094 int32_t ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t val) 1095 { 1096 uint32_t atlas_ctl; 1097 1098 DEBUGFUNC("ixgbe_write_analog_reg8_82598"); 1099 1100 atlas_ctl = (reg << 8) | val; 1101 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); 1102 IXGBE_WRITE_FLUSH(hw); 1103 usec_delay(10); 1104 1105 return IXGBE_SUCCESS; 1106 } 1107 1108 /** 1109 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. 1110 * @hw: pointer to hardware structure 1111 * @dev_addr: address to read from 1112 * @byte_offset: byte offset to read from dev_addr 1113 * @eeprom_data: value read 1114 * 1115 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1116 **/ 1117 int32_t ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, uint8_t dev_addr, 1118 uint8_t byte_offset, uint8_t *eeprom_data) 1119 { 1120 int32_t status = IXGBE_SUCCESS; 1121 uint16_t sfp_addr = 0; 1122 uint16_t sfp_data = 0; 1123 uint16_t sfp_stat = 0; 1124 uint16_t gssr; 1125 uint32_t i; 1126 1127 DEBUGFUNC("ixgbe_read_i2c_phy_82598"); 1128 1129 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 1130 gssr = IXGBE_GSSR_PHY1_SM; 1131 else 1132 gssr = IXGBE_GSSR_PHY0_SM; 1133 1134 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) 1135 return IXGBE_ERR_SWFW_SYNC; 1136 1137 if (hw->phy.type == ixgbe_phy_nl) { 1138 /* 1139 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to 1140 * 0xC30D. These registers are used to talk to the SFP+ 1141 * module's EEPROM through the SDA/SCL (I2C) interface. 1142 */ 1143 sfp_addr = (dev_addr << 8) + byte_offset; 1144 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); 1145 hw->phy.ops.write_reg_mdi(hw, 1146 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, 1147 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 1148 sfp_addr); 1149 1150 /* Poll status */ 1151 for (i = 0; i < 100; i++) { 1152 hw->phy.ops.read_reg_mdi(hw, 1153 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, 1154 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 1155 &sfp_stat); 1156 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1157 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1158 break; 1159 msec_delay(10); 1160 } 1161 1162 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { 1163 DEBUGOUT("EEPROM read did not pass.\n"); 1164 status = IXGBE_ERR_SFP_NOT_PRESENT; 1165 goto out; 1166 } 1167 1168 /* Read data */ 1169 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, 1170 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); 1171 1172 *eeprom_data = (uint8_t)(sfp_data >> 8); 1173 } else { 1174 status = IXGBE_ERR_PHY; 1175 } 1176 1177 out: 1178 hw->mac.ops.release_swfw_sync(hw, gssr); 1179 return status; 1180 } 1181 1182 /** 1183 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. 1184 * @hw: pointer to hardware structure 1185 * @byte_offset: EEPROM byte offset to read 1186 * @eeprom_data: value read 1187 * 1188 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1189 **/ 1190 int32_t ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, uint8_t byte_offset, 1191 uint8_t *eeprom_data) 1192 { 1193 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, 1194 byte_offset, eeprom_data); 1195 } 1196 1197 /** 1198 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type 1199 * @hw: pointer to hardware structure 1200 * 1201 * Determines physical layer capabilities of the current configuration. 1202 **/ 1203 uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 1204 { 1205 uint32_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1206 uint32_t autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1207 uint32_t pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1208 uint32_t pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1209 uint16_t ext_ability = 0; 1210 1211 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); 1212 1213 hw->phy.ops.identify(hw); 1214 1215 /* Copper PHY must be checked before AUTOC LMS to determine correct 1216 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1217 switch (hw->phy.type) { 1218 case ixgbe_phy_tn: 1219 case ixgbe_phy_cu_unknown: 1220 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1221 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1222 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1223 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1224 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 1225 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1226 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 1227 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1228 goto out; 1229 default: 1230 break; 1231 } 1232 1233 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1234 case IXGBE_AUTOC_LMS_1G_AN: 1235 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1236 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) 1237 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1238 else 1239 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1240 break; 1241 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1242 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) 1243 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1244 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) 1245 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1246 else { /* XAUI */ 1247 if (autoc & IXGBE_AUTOC_KX_SUPP) 1248 physical_layer |= 1249 IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1250 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1251 physical_layer |= 1252 IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1253 } 1254 break; 1255 case IXGBE_AUTOC_LMS_KX4_AN: 1256 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 1257 if (autoc & IXGBE_AUTOC_KX_SUPP) 1258 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1259 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1260 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1261 break; 1262 default: 1263 break; 1264 } 1265 1266 if (hw->phy.type == ixgbe_phy_nl) { 1267 hw->phy.ops.identify_sfp(hw); 1268 1269 switch (hw->phy.sfp_type) { 1270 case ixgbe_sfp_type_da_cu: 1271 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1272 break; 1273 case ixgbe_sfp_type_sr: 1274 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1275 break; 1276 case ixgbe_sfp_type_lr: 1277 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1278 break; 1279 default: 1280 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1281 break; 1282 } 1283 } 1284 1285 switch (hw->device_id) { 1286 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 1287 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1288 break; 1289 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 1290 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 1291 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 1292 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1293 break; 1294 case IXGBE_DEV_ID_82598EB_XF_LR: 1295 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1296 break; 1297 default: 1298 break; 1299 } 1300 1301 out: 1302 return physical_layer; 1303 } 1304 1305 /** 1306 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple 1307 * port devices. 1308 * @hw: pointer to the HW structure 1309 * 1310 * Calls common function and corrects issue with some single port devices 1311 * that enable LAN1 but not LAN0. 1312 **/ 1313 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) 1314 { 1315 struct ixgbe_bus_info *bus = &hw->bus; 1316 uint16_t pci_gen = 0; 1317 uint16_t pci_ctrl2 = 0; 1318 1319 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); 1320 1321 ixgbe_set_lan_id_multi_port_pcie(hw); 1322 1323 /* check if LAN0 is disabled */ 1324 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); 1325 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { 1326 1327 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); 1328 1329 /* if LAN0 is completely disabled force function to 0 */ 1330 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && 1331 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && 1332 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { 1333 1334 bus->func = 0; 1335 } 1336 } 1337 } 1338 1339 /** 1340 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering 1341 * @hw: pointer to hardware structure 1342 * 1343 **/ 1344 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) 1345 { 1346 uint32_t regval; 1347 uint32_t i; 1348 1349 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598"); 1350 1351 /* Enable relaxed ordering */ 1352 for (i = 0; ((i < hw->mac.max_tx_queues) && 1353 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 1354 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 1355 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1356 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 1357 } 1358 1359 for (i = 0; ((i < hw->mac.max_rx_queues) && 1360 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 1361 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 1362 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | 1363 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 1364 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 1365 } 1366 1367 } 1368