1 /* $OpenBSD: ixgbe_82599.c,v 1.3 2011/06/10 12:46:35 claudio Exp $ */ 2 3 /****************************************************************************** 4 5 Copyright (c) 2001-2009, Intel Corporation 6 All rights reserved. 7 8 Redistribution and use in source and binary forms, with or without 9 modification, are permitted provided that the following conditions are met: 10 11 1. Redistributions of source code must retain the above copyright notice, 12 this list of conditions and the following disclaimer. 13 14 2. Redistributions in binary form must reproduce the above copyright 15 notice, this list of conditions and the following disclaimer in the 16 documentation and/or other materials provided with the distribution. 17 18 3. Neither the name of the Intel Corporation nor the names of its 19 contributors may be used to endorse or promote products derived from 20 this software without specific prior written permission. 21 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 POSSIBILITY OF SUCH DAMAGE. 33 34 ******************************************************************************/ 35 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.3 2009/12/07 21:30:54 jfv Exp $*/ 36 37 #include <dev/pci/ixgbe.h> 38 #include <dev/pci/ixgbe_type.h> 39 40 int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw); 41 int32_t ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 42 ixgbe_link_speed *speed, 43 int *autoneg); 44 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); 45 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 46 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 47 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 48 int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 49 ixgbe_link_speed speed, int autoneg, 50 int autoneg_wait_to_complete); 51 int32_t ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 52 ixgbe_link_speed speed, int autoneg, 53 int autoneg_wait_to_complete); 54 int32_t ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 55 int autoneg_wait_to_complete); 56 int32_t ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 57 ixgbe_link_speed speed, 58 int autoneg, 59 int autoneg_wait_to_complete); 60 int32_t ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 61 ixgbe_link_speed speed, 62 int autoneg, 63 int autoneg_wait_to_complete); 64 int32_t ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); 65 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); 66 int32_t ixgbe_reset_hw_82599(struct ixgbe_hw *hw); 67 int32_t ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, uint32_t reg, uint8_t *val); 68 int32_t ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, uint32_t reg, uint8_t val); 69 int32_t ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw); 70 int32_t ixgbe_identify_phy_82599(struct ixgbe_hw *hw); 71 int32_t ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); 72 uint32_t ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); 73 int32_t ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, uint32_t regval); 74 int32_t ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 75 int ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); 76 77 uint32_t ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 78 union ixgbe_atr_hash_dword common); 79 int32_t ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 80 union ixgbe_atr_hash_dword input, 81 union ixgbe_atr_hash_dword common, 82 uint8_t queue); 83 uint32_t ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks); 84 85 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 86 { 87 struct ixgbe_mac_info *mac = &hw->mac; 88 89 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 90 91 /* enable the laser control functions for SFP+ fiber */ 92 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { 93 mac->ops.disable_tx_laser = 94 &ixgbe_disable_tx_laser_multispeed_fiber; 95 mac->ops.enable_tx_laser = 96 &ixgbe_enable_tx_laser_multispeed_fiber; 97 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 98 99 } else { 100 mac->ops.disable_tx_laser = NULL; 101 mac->ops.enable_tx_laser = NULL; 102 mac->ops.flap_tx_laser = NULL; 103 } 104 105 if (hw->phy.multispeed_fiber) { 106 /* Set up dual speed SFP+ support */ 107 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 108 } else { 109 if ((ixgbe_hw0(hw, get_media_type) == ixgbe_media_type_backplane) && 110 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 111 hw->phy.smart_speed == ixgbe_smart_speed_on) && 112 !ixgbe_verify_lesm_fw_enabled_82599(hw)) { 113 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; 114 } else { 115 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 116 } 117 } 118 } 119 120 /** 121 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 122 * @hw: pointer to hardware structure 123 * 124 * Initialize any function pointers that were not able to be 125 * set during init_shared_code because the PHY/SFP type was 126 * not known. Perform the SFP init if necessary. 127 * 128 **/ 129 int32_t ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 130 { 131 struct ixgbe_mac_info *mac = &hw->mac; 132 struct ixgbe_phy_info *phy = &hw->phy; 133 int32_t ret_val = IXGBE_SUCCESS; 134 135 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 136 137 /* Identify the PHY or SFP module */ 138 ret_val = phy->ops.identify(hw); 139 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 140 goto init_phy_ops_out; 141 142 /* Setup function pointers based on detected SFP module and speeds */ 143 ixgbe_init_mac_link_ops_82599(hw); 144 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 145 hw->phy.ops.reset = NULL; 146 147 /* If copper media, overwrite with copper function pointers */ 148 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 149 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 150 mac->ops.get_link_capabilities = 151 &ixgbe_get_copper_link_capabilities_generic; 152 } 153 154 /* Set necessary function pointers based on phy type */ 155 switch (hw->phy.type) { 156 case ixgbe_phy_tn: 157 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 158 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 159 phy->ops.get_firmware_version = 160 &ixgbe_get_phy_firmware_version_tnx; 161 break; 162 case ixgbe_phy_aq: 163 phy->ops.get_firmware_version = 164 &ixgbe_get_phy_firmware_version_generic; 165 break; 166 default: 167 break; 168 } 169 init_phy_ops_out: 170 return ret_val; 171 } 172 173 int32_t ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 174 { 175 int32_t ret_val = IXGBE_SUCCESS; 176 uint32_t reg_anlp1 = 0; 177 uint32_t i = 0; 178 uint16_t list_offset, data_offset, data_value; 179 180 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 181 182 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 183 ixgbe_init_mac_link_ops_82599(hw); 184 185 hw->phy.ops.reset = NULL; 186 187 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 188 &data_offset); 189 if (ret_val != IXGBE_SUCCESS) 190 goto setup_sfp_out; 191 192 /* PHY config will finish before releasing the semaphore */ 193 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 194 if (ret_val != IXGBE_SUCCESS) { 195 ret_val = IXGBE_ERR_SWFW_SYNC; 196 goto setup_sfp_out; 197 } 198 199 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 200 while (data_value != 0xffff) { 201 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 202 IXGBE_WRITE_FLUSH(hw); 203 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 204 } 205 206 /* Release the semaphore */ 207 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 208 /* Delay obtaining semaphore again to allow FW access */ 209 msec_delay(hw->eeprom.semaphore_delay); 210 211 /* Now restart DSP by setting Restart_AN and clearing LMS */ 212 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, 213 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | 214 IXGBE_AUTOC_AN_RESTART)); 215 216 /* Wait for AN to leave state 0 */ 217 for (i = 0; i < 10; i++) { 218 msec_delay(4); 219 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); 220 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) 221 break; 222 } 223 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { 224 DEBUGOUT("sfp module setup not complete\n"); 225 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 226 goto setup_sfp_out; 227 } 228 229 /* Restart DSP by setting Restart_AN and return to SFI mode */ 230 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, 231 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | 232 IXGBE_AUTOC_AN_RESTART)); 233 } 234 235 setup_sfp_out: 236 return ret_val; 237 } 238 239 /** 240 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 241 * @hw: pointer to hardware structure 242 * 243 * Initialize the function pointers and assign the MAC type for 82599. 244 * Does not touch the hardware. 245 **/ 246 247 int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw) 248 { 249 struct ixgbe_mac_info *mac = &hw->mac; 250 struct ixgbe_phy_info *phy = &hw->phy; 251 int32_t ret_val; 252 253 DEBUGFUNC("ixgbe_init_ops_82599"); 254 255 ret_val = ixgbe_init_phy_ops_generic(hw); 256 ret_val = ixgbe_init_ops_generic(hw); 257 258 /* PHY */ 259 phy->ops.identify = &ixgbe_identify_phy_82599; 260 phy->ops.init = &ixgbe_init_phy_ops_82599; 261 262 /* MAC */ 263 mac->ops.reset_hw = &ixgbe_reset_hw_82599; 264 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; 265 mac->ops.get_media_type = &ixgbe_get_media_type_82599; 266 mac->ops.get_supported_physical_layer = 267 &ixgbe_get_supported_physical_layer_82599; 268 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; 269 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; 270 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; 271 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599; 272 273 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; 274 #if 0 275 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; 276 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; 277 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; 278 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; 279 #endif 280 281 /* RAR, Multicast, VLAN */ 282 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; 283 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; 284 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; 285 mac->rar_highwater = 1; 286 mac->ops.set_vfta = &ixgbe_set_vfta_generic; 287 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; 288 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; 289 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; 290 #if 0 291 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; 292 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; 293 #endif 294 295 /* Link */ 296 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; 297 mac->ops.check_link = &ixgbe_check_mac_link_generic; 298 ixgbe_init_mac_link_ops_82599(hw); 299 300 mac->mcft_size = 128; 301 mac->vft_size = 128; 302 mac->num_rar_entries = 128; 303 mac->rx_pb_size = 512; 304 mac->max_tx_queues = 128; 305 mac->max_rx_queues = 128; 306 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 307 308 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; 309 310 return ret_val; 311 } 312 313 /** 314 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 315 * @hw: pointer to hardware structure 316 * @speed: pointer to link speed 317 * @negotiation: TRUE when autoneg or autotry is enabled 318 * 319 * Determines the link capabilities by reading the AUTOC register. 320 **/ 321 int32_t ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 322 ixgbe_link_speed *speed, 323 int *negotiation) 324 { 325 int32_t status = IXGBE_SUCCESS; 326 uint32_t autoc = 0; 327 328 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 329 330 /* Check if 1G SFP module. */ 331 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 332 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { 333 *speed = IXGBE_LINK_SPEED_1GB_FULL; 334 *negotiation = TRUE; 335 goto out; 336 } 337 338 /* 339 * Determine link capabilities based on the stored value of AUTOC, 340 * which represents EEPROM defaults. If AUTOC value has not 341 * been stored, use the current register values. 342 */ 343 if (hw->mac.orig_link_settings_stored) 344 autoc = hw->mac.orig_autoc; 345 else 346 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 347 348 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 349 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 350 *speed = IXGBE_LINK_SPEED_1GB_FULL; 351 *negotiation = FALSE; 352 break; 353 354 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 355 *speed = IXGBE_LINK_SPEED_10GB_FULL; 356 *negotiation = FALSE; 357 break; 358 359 case IXGBE_AUTOC_LMS_1G_AN: 360 *speed = IXGBE_LINK_SPEED_1GB_FULL; 361 *negotiation = TRUE; 362 break; 363 364 case IXGBE_AUTOC_LMS_10G_SERIAL: 365 *speed = IXGBE_LINK_SPEED_10GB_FULL; 366 *negotiation = FALSE; 367 break; 368 369 case IXGBE_AUTOC_LMS_KX4_KX_KR: 370 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 371 *speed = IXGBE_LINK_SPEED_UNKNOWN; 372 if (autoc & IXGBE_AUTOC_KR_SUPP) 373 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 374 if (autoc & IXGBE_AUTOC_KX4_SUPP) 375 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 376 if (autoc & IXGBE_AUTOC_KX_SUPP) 377 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 378 *negotiation = TRUE; 379 break; 380 381 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 382 *speed = IXGBE_LINK_SPEED_100_FULL; 383 if (autoc & IXGBE_AUTOC_KR_SUPP) 384 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 385 if (autoc & IXGBE_AUTOC_KX4_SUPP) 386 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 387 if (autoc & IXGBE_AUTOC_KX_SUPP) 388 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 389 *negotiation = TRUE; 390 break; 391 392 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 393 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 394 *negotiation = FALSE; 395 break; 396 397 default: 398 status = IXGBE_ERR_LINK_SETUP; 399 goto out; 400 break; 401 } 402 403 if (hw->phy.multispeed_fiber) { 404 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 405 IXGBE_LINK_SPEED_1GB_FULL; 406 *negotiation = TRUE; 407 } 408 409 out: 410 return status; 411 } 412 413 /** 414 * ixgbe_get_media_type_82599 - Get media type 415 * @hw: pointer to hardware structure 416 * 417 * Returns the media type (fiber, copper, backplane) 418 **/ 419 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 420 { 421 enum ixgbe_media_type media_type; 422 423 DEBUGFUNC("ixgbe_get_media_type_82599"); 424 425 /* Detect if there is a copper PHY attached. */ 426 switch (hw->phy.type) { 427 case ixgbe_phy_cu_unknown: 428 case ixgbe_phy_tn: 429 case ixgbe_phy_aq: 430 media_type = ixgbe_media_type_copper; 431 goto out; 432 default: 433 break; 434 } 435 436 switch (hw->device_id) { 437 case IXGBE_DEV_ID_82599_KX4: 438 case IXGBE_DEV_ID_82599_KX4_MEZZ: 439 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 440 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 441 case IXGBE_DEV_ID_82599_XAUI_LOM: 442 /* Default device ID is mezzanine card KX/KX4 */ 443 media_type = ixgbe_media_type_backplane; 444 break; 445 case IXGBE_DEV_ID_82599_SFP: 446 case IXGBE_DEV_ID_82599_SFP_FCOE: 447 media_type = ixgbe_media_type_fiber; 448 break; 449 case IXGBE_DEV_ID_82599_CX4: 450 media_type = ixgbe_media_type_cx4; 451 break; 452 case IXGBE_DEV_ID_82599_T3_LOM: 453 media_type = ixgbe_media_type_copper; 454 break; 455 default: 456 media_type = ixgbe_media_type_unknown; 457 break; 458 } 459 out: 460 return media_type; 461 } 462 463 /** 464 * ixgbe_start_mac_link_82599 - Setup MAC link settings 465 * @hw: pointer to hardware structure 466 * 467 * Configures link settings based on values in the ixgbe_hw struct. 468 * Restarts the link. Performs autonegotiation if needed. 469 **/ 470 int32_t ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 471 int autoneg_wait_to_complete) 472 { 473 uint32_t autoc_reg; 474 uint32_t links_reg; 475 uint32_t i; 476 int32_t status = IXGBE_SUCCESS; 477 478 DEBUGFUNC("ixgbe_start_mac_link_82599"); 479 480 481 /* Restart link */ 482 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 483 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 484 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 485 486 /* Only poll for autoneg to complete if specified to do so */ 487 if (autoneg_wait_to_complete) { 488 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 489 IXGBE_AUTOC_LMS_KX4_KX_KR || 490 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 491 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 492 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 493 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 494 links_reg = 0; /* Just in case Autoneg time = 0 */ 495 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 496 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 497 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 498 break; 499 msec_delay(100); 500 } 501 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 502 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 503 DEBUGOUT("Autoneg did not complete.\n"); 504 } 505 } 506 } 507 508 /* Add delay to filter out noises during initial link setup */ 509 msec_delay(50); 510 511 return status; 512 } 513 514 /** 515 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 516 * @hw: pointer to hardware structure 517 * 518 * The base drivers may require better control over SFP+ module 519 * PHY states. This includes selectively shutting down the Tx 520 * laser on the PHY, effectively halting physical link. 521 **/ 522 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 523 { 524 uint32_t esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 525 526 /* Disable tx laser; allow 100us to go dark per spec */ 527 esdp_reg |= IXGBE_ESDP_SDP3; 528 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 529 IXGBE_WRITE_FLUSH(hw); 530 usec_delay(100); 531 } 532 533 /** 534 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 535 * @hw: pointer to hardware structure 536 * 537 * The base drivers may require better control over SFP+ module 538 * PHY states. This includes selectively turning on the Tx 539 * laser on the PHY, effectively starting physical link. 540 **/ 541 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 542 { 543 uint32_t esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 544 545 /* Enable tx laser; allow 100ms to light up */ 546 esdp_reg &= ~IXGBE_ESDP_SDP3; 547 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 548 IXGBE_WRITE_FLUSH(hw); 549 msec_delay(100); 550 } 551 552 /** 553 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 554 * @hw: pointer to hardware structure 555 * 556 * When the driver changes the link speeds that it can support, 557 * it sets autotry_restart to TRUE to indicate that we need to 558 * initiate a new autotry session with the link partner. To do 559 * so, we set the speed then disable and re-enable the tx laser, to 560 * alert the link partner that it also needs to restart autotry on its 561 * end. This is consistent with TRUE clause 37 autoneg, which also 562 * involves a loss of signal. 563 **/ 564 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 565 { 566 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); 567 568 if (hw->mac.autotry_restart) { 569 ixgbe_disable_tx_laser_multispeed_fiber(hw); 570 ixgbe_enable_tx_laser_multispeed_fiber(hw); 571 hw->mac.autotry_restart = FALSE; 572 } 573 } 574 575 /** 576 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 577 * @hw: pointer to hardware structure 578 * @speed: new link speed 579 * @autoneg: TRUE if autonegotiation enabled 580 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 581 * 582 * Set the link speed in the AUTOC register and restarts link. 583 **/ 584 int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 585 ixgbe_link_speed speed, int autoneg, 586 int autoneg_wait_to_complete) 587 { 588 int32_t status = IXGBE_SUCCESS; 589 ixgbe_link_speed link_speed; 590 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 591 uint32_t speedcnt = 0; 592 uint32_t esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 593 uint32_t i = 0; 594 int link_up = FALSE; 595 int negotiation; 596 597 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 598 599 /* Mask off requested but non-supported speeds */ 600 status = ixgbe_hw(hw, get_link_capabilities, &link_speed, &negotiation); 601 if (status != IXGBE_SUCCESS) 602 return status; 603 604 speed &= link_speed; 605 606 /* 607 * Try each speed one by one, highest priority first. We do this in 608 * software because 10gb fiber doesn't support speed autonegotiation. 609 */ 610 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 611 speedcnt++; 612 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 613 614 /* If we already have link at this speed, just jump out */ 615 status = ixgbe_hw(hw, check_link, &link_speed, &link_up, FALSE); 616 if (status != IXGBE_SUCCESS) 617 return status; 618 619 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 620 goto out; 621 622 /* Set the module link speed */ 623 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 624 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 625 IXGBE_WRITE_FLUSH(hw); 626 627 /* Allow module to change analog characteristics (1G->10G) */ 628 msec_delay(40); 629 630 status = ixgbe_setup_mac_link_82599(hw, 631 IXGBE_LINK_SPEED_10GB_FULL, 632 autoneg, 633 autoneg_wait_to_complete); 634 if (status != IXGBE_SUCCESS) 635 return status; 636 637 /* Flap the tx laser if it has not already been done */ 638 ixgbe_hw(hw, flap_tx_laser); 639 640 /* 641 * Wait for the controller to acquire link. Per IEEE 802.3ap, 642 * Section 73.10.2, we may have to wait up to 500ms if KR is 643 * attempted. 82599 uses the same timing for 10g SFI. 644 */ 645 for (i = 0; i < 5; i++) { 646 /* Wait for the link partner to also set speed */ 647 msec_delay(100); 648 649 /* If we have link, just jump out */ 650 status = ixgbe_hw(hw, check_link, &link_speed, 651 &link_up, FALSE); 652 if (status != IXGBE_SUCCESS) 653 return status; 654 655 if (link_up) 656 goto out; 657 } 658 } 659 660 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 661 speedcnt++; 662 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 663 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 664 665 /* If we already have link at this speed, just jump out */ 666 status = ixgbe_hw(hw, check_link, &link_speed, &link_up, FALSE); 667 if (status != IXGBE_SUCCESS) 668 return status; 669 670 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 671 goto out; 672 673 /* Set the module link speed */ 674 esdp_reg &= ~IXGBE_ESDP_SDP5; 675 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 676 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 677 IXGBE_WRITE_FLUSH(hw); 678 679 /* Allow module to change analog characteristics (10G->1G) */ 680 msec_delay(40); 681 682 status = ixgbe_setup_mac_link_82599(hw, 683 IXGBE_LINK_SPEED_1GB_FULL, 684 autoneg, 685 autoneg_wait_to_complete); 686 if (status != IXGBE_SUCCESS) 687 return status; 688 689 /* Flap the tx laser if it has not already been done */ 690 ixgbe_hw(hw, flap_tx_laser); 691 692 /* Wait for the link partner to also set speed */ 693 msec_delay(100); 694 695 /* If we have link, just jump out */ 696 status = ixgbe_hw(hw, check_link, &link_speed, &link_up, FALSE); 697 if (status != IXGBE_SUCCESS) 698 return status; 699 700 if (link_up) 701 goto out; 702 } 703 704 /* 705 * We didn't get link. Configure back to the highest speed we tried, 706 * (if there was more than one). We call ourselves back with just the 707 * single highest speed that the user requested. 708 */ 709 if (speedcnt > 1) 710 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 711 highest_link_speed, autoneg, autoneg_wait_to_complete); 712 713 out: 714 /* Set autoneg_advertised value based on input link speed */ 715 hw->phy.autoneg_advertised = 0; 716 717 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 718 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 719 720 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 721 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 722 723 return status; 724 } 725 726 /** 727 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 728 * @hw: pointer to hardware structure 729 * @speed: new link speed 730 * @autoneg: TRUE if autonegotiation enabled 731 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 732 * 733 * Implements the Intel SmartSpeed algorithm. 734 **/ 735 int32_t ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 736 ixgbe_link_speed speed, int autoneg, 737 int autoneg_wait_to_complete) 738 { 739 int32_t status = IXGBE_SUCCESS; 740 ixgbe_link_speed link_speed; 741 int32_t i, j; 742 int link_up = FALSE; 743 uint32_t autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 744 745 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); 746 747 /* Set autoneg_advertised value based on input link speed */ 748 hw->phy.autoneg_advertised = 0; 749 750 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 751 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 752 753 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 754 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 755 756 if (speed & IXGBE_LINK_SPEED_100_FULL) 757 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 758 759 /* 760 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 761 * autoneg advertisement if link is unable to be established at the 762 * highest negotiated rate. This can sometimes happen due to integrity 763 * issues with the physical media connection. 764 */ 765 766 /* First, try to get link with full advertisement */ 767 hw->phy.smart_speed_active = FALSE; 768 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 769 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 770 autoneg_wait_to_complete); 771 if (status != IXGBE_SUCCESS) 772 goto out; 773 774 /* 775 * Wait for the controller to acquire link. Per IEEE 802.3ap, 776 * Section 73.10.2, we may have to wait up to 500ms if KR is 777 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 778 * Table 9 in the AN MAS. 779 */ 780 for (i = 0; i < 5; i++) { 781 msec_delay(100); 782 783 /* If we have link, just jump out */ 784 status = ixgbe_hw(hw, check_link, &link_speed, &link_up, 785 FALSE); 786 if (status != IXGBE_SUCCESS) 787 goto out; 788 789 if (link_up) 790 goto out; 791 } 792 } 793 794 /* 795 * We didn't get link. If we advertised KR plus one of KX4/KX 796 * (or BX4/BX), then disable KR and try again. 797 */ 798 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 799 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 800 goto out; 801 802 /* Turn SmartSpeed on to disable KR support */ 803 hw->phy.smart_speed_active = TRUE; 804 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 805 autoneg_wait_to_complete); 806 if (status != IXGBE_SUCCESS) 807 goto out; 808 809 /* 810 * Wait for the controller to acquire link. 600ms will allow for 811 * the AN link_fail_inhibit_timer as well for multiple cycles of 812 * parallel detect, both 10g and 1g. This allows for the maximum 813 * connect attempts as defined in the AN MAS table 73-7. 814 */ 815 for (i = 0; i < 6; i++) { 816 msec_delay(100); 817 818 /* If we have link, just jump out */ 819 status = ixgbe_hw(hw, check_link, &link_speed, &link_up, FALSE); 820 if (status != IXGBE_SUCCESS) 821 goto out; 822 823 if (link_up) 824 goto out; 825 } 826 827 /* We didn't get link. Turn SmartSpeed back off. */ 828 hw->phy.smart_speed_active = FALSE; 829 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 830 autoneg_wait_to_complete); 831 832 out: 833 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 834 DEBUGOUT("Smartspeed has downgraded the link speed " 835 "from the maximum advertised\n"); 836 return status; 837 } 838 839 /** 840 * ixgbe_setup_mac_link_82599 - Set MAC link speed 841 * @hw: pointer to hardware structure 842 * @speed: new link speed 843 * @autoneg: TRUE if autonegotiation enabled 844 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 845 * 846 * Set the link speed in the AUTOC register and restarts link. 847 **/ 848 int32_t ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 849 ixgbe_link_speed speed, int autoneg, 850 int autoneg_wait_to_complete) 851 { 852 int32_t status = IXGBE_SUCCESS; 853 uint32_t autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 854 uint32_t autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 855 uint32_t start_autoc = autoc; 856 uint32_t orig_autoc = 0; 857 uint32_t link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 858 uint32_t pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 859 uint32_t pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 860 uint32_t links_reg; 861 uint32_t i; 862 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 863 864 DEBUGFUNC("ixgbe_setup_mac_link_82599"); 865 866 /* Check to see if speed passed in is supported. */ 867 status = ixgbe_hw(hw, get_link_capabilities, &link_capabilities, &autoneg); 868 if (status != IXGBE_SUCCESS) 869 goto out; 870 871 speed &= link_capabilities; 872 873 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 874 status = IXGBE_ERR_LINK_SETUP; 875 goto out; 876 } 877 878 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 879 if (hw->mac.orig_link_settings_stored) 880 orig_autoc = hw->mac.orig_autoc; 881 else 882 orig_autoc = autoc; 883 884 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 885 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 886 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 887 /* Set KX4/KX/KR support according to speed requested */ 888 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 889 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 890 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 891 autoc |= IXGBE_AUTOC_KX4_SUPP; 892 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 893 (hw->phy.smart_speed_active == FALSE)) 894 autoc |= IXGBE_AUTOC_KR_SUPP; 895 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 896 autoc |= IXGBE_AUTOC_KX_SUPP; 897 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 898 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 899 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 900 /* Switch from 1G SFI to 10G SFI if requested */ 901 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 902 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 903 autoc &= ~IXGBE_AUTOC_LMS_MASK; 904 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 905 } 906 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 907 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 908 /* Switch from 10G SFI to 1G SFI if requested */ 909 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 910 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 911 autoc &= ~IXGBE_AUTOC_LMS_MASK; 912 if (autoneg) 913 autoc |= IXGBE_AUTOC_LMS_1G_AN; 914 else 915 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 916 } 917 } 918 919 if (autoc != start_autoc) { 920 /* Restart link */ 921 autoc |= IXGBE_AUTOC_AN_RESTART; 922 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 923 924 /* Only poll for autoneg to complete if specified to do so */ 925 if (autoneg_wait_to_complete) { 926 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 927 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 928 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 929 links_reg = 0; /*Just in case Autoneg time=0*/ 930 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 931 links_reg = 932 IXGBE_READ_REG(hw, IXGBE_LINKS); 933 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 934 break; 935 msec_delay(100); 936 } 937 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 938 status = 939 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 940 DEBUGOUT("Autoneg did not complete.\n"); 941 } 942 } 943 } 944 945 /* Add delay to filter out noises during initial link setup */ 946 msec_delay(50); 947 } 948 949 out: 950 return status; 951 } 952 953 /** 954 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 955 * @hw: pointer to hardware structure 956 * @speed: new link speed 957 * @autoneg: TRUE if autonegotiation enabled 958 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 959 * 960 * Restarts link on PHY and MAC based on settings passed in. 961 **/ 962 int32_t ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 963 ixgbe_link_speed speed, 964 int autoneg, 965 int autoneg_wait_to_complete) 966 { 967 int32_t status; 968 969 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 970 971 /* Setup the PHY according to input speed */ 972 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 973 autoneg_wait_to_complete); 974 /* Set up MAC */ 975 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 976 977 return status; 978 } 979 /** 980 * ixgbe_reset_hw_82599 - Perform hardware reset 981 * @hw: pointer to hardware structure 982 * 983 * Resets the hardware by resetting the transmit and receive units, masks 984 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 985 * reset. 986 **/ 987 int32_t ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 988 { 989 int32_t status = IXGBE_SUCCESS; 990 uint32_t ctrl; 991 uint32_t i; 992 uint32_t autoc; 993 uint32_t autoc2; 994 995 DEBUGFUNC("ixgbe_reset_hw_82599"); 996 997 /* Call adapter stop to disable tx/rx and clear interrupts */ 998 hw->mac.ops.stop_adapter(hw); 999 1000 /* PHY ops must be identified and initialized prior to reset */ 1001 1002 /* Identify PHY and related function pointers */ 1003 status = hw->phy.ops.init(hw); 1004 1005 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1006 goto reset_hw_out; 1007 1008 /* Setup SFP module if there is one present. */ 1009 if (hw->phy.sfp_setup_needed) { 1010 status = hw->mac.ops.setup_sfp(hw); 1011 hw->phy.sfp_setup_needed = FALSE; 1012 } 1013 1014 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1015 goto reset_hw_out; 1016 1017 /* Reset PHY */ 1018 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) 1019 hw->phy.ops.reset(hw); 1020 1021 /* 1022 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 1023 * access and verify no pending requests before reset 1024 */ 1025 ixgbe_disable_pcie_master(hw); 1026 1027 mac_reset_top: 1028 /* 1029 * Issue global reset to the MAC. This needs to be a SW reset. 1030 * If link reset is used, it might reset the MAC when mng is using it 1031 */ 1032 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1033 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 1034 IXGBE_WRITE_FLUSH(hw); 1035 1036 /* Poll for reset bit to self-clear indicating reset is complete */ 1037 for (i = 0; i < 10; i++) { 1038 usec_delay(1); 1039 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1040 if (!(ctrl & IXGBE_CTRL_RST)) 1041 break; 1042 } 1043 if (ctrl & IXGBE_CTRL_RST) { 1044 status = IXGBE_ERR_RESET_FAILED; 1045 DEBUGOUT("Reset polling failed to complete.\n"); 1046 } 1047 1048 /* 1049 * Double resets are required for recovery from certain error 1050 * conditions. Between resets, it is necessary to stall to allow time 1051 * for any pending HW events to complete. We use 1usec since that is 1052 * what is needed for ixgbe_disable_pcie_master(). The second reset 1053 * then clears out any effects of those events. 1054 */ 1055 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1056 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1057 usec_delay(1); 1058 goto mac_reset_top; 1059 } 1060 1061 msec_delay(50); 1062 1063 /* 1064 * Store the original AUTOC/AUTOC2 values if they have not been 1065 * stored off yet. Otherwise restore the stored original 1066 * values since the reset operation sets back to defaults. 1067 */ 1068 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1069 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1070 if (hw->mac.orig_link_settings_stored == FALSE) { 1071 hw->mac.orig_autoc = autoc; 1072 hw->mac.orig_autoc2 = autoc2; 1073 hw->mac.orig_link_settings_stored = TRUE; 1074 } else { 1075 if (autoc != hw->mac.orig_autoc) 1076 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 1077 IXGBE_AUTOC_AN_RESTART)); 1078 1079 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1080 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1081 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1082 autoc2 |= (hw->mac.orig_autoc2 & 1083 IXGBE_AUTOC2_UPPER_MASK); 1084 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1085 } 1086 } 1087 1088 /* Store the permanent mac address */ 1089 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1090 1091 /* 1092 * Store MAC address from RAR0, clear receive address registers, and 1093 * clear the multicast table. Also reset num_rar_entries to 128, 1094 * since we modify this value when programming the SAN MAC address. 1095 */ 1096 hw->mac.num_rar_entries = 128; 1097 hw->mac.ops.init_rx_addrs(hw); 1098 1099 #if 0 1100 /* Store the permanent SAN mac address */ 1101 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1102 1103 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1104 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1105 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1106 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1107 1108 /* Reserve the last RAR for the SAN MAC address */ 1109 hw->mac.num_rar_entries--; 1110 } 1111 1112 /* Store the alternative WWNN/WWPN prefix */ 1113 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1114 &hw->mac.wwpn_prefix); 1115 #endif 1116 reset_hw_out: 1117 return status; 1118 } 1119 1120 /** 1121 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1122 * @hw: pointer to hardware structure 1123 **/ 1124 int32_t ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1125 { 1126 int i; 1127 uint32_t fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1128 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1129 1130 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); 1131 1132 /* 1133 * Before starting reinitialization process, 1134 * FDIRCMD.CMD must be zero. 1135 */ 1136 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1137 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1138 IXGBE_FDIRCMD_CMD_MASK)) 1139 break; 1140 usec_delay(10); 1141 } 1142 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1143 DEBUGOUT("Flow Director previous command isn't complete, " 1144 "aborting table re-initialization. \n"); 1145 return IXGBE_ERR_FDIR_REINIT_FAILED; 1146 } 1147 1148 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1149 IXGBE_WRITE_FLUSH(hw); 1150 /* 1151 * 82599 adapters flow director init flow cannot be restarted, 1152 * Workaround 82599 silicon errata by performing the following steps 1153 * before re-writing the FDIRCTRL control register with the same value. 1154 * - write 1 to bit 8 of FDIRCMD register & 1155 * - write 0 to bit 8 of FDIRCMD register 1156 */ 1157 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1158 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1159 IXGBE_FDIRCMD_CLEARHT)); 1160 IXGBE_WRITE_FLUSH(hw); 1161 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1162 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1163 ~IXGBE_FDIRCMD_CLEARHT)); 1164 IXGBE_WRITE_FLUSH(hw); 1165 /* 1166 * Clear FDIR Hash register to clear any leftover hashes 1167 * waiting to be programmed. 1168 */ 1169 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1170 IXGBE_WRITE_FLUSH(hw); 1171 1172 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1173 IXGBE_WRITE_FLUSH(hw); 1174 1175 /* Poll init-done after we write FDIRCTRL register */ 1176 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1177 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1178 IXGBE_FDIRCTRL_INIT_DONE) 1179 break; 1180 usec_delay(10); 1181 } 1182 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1183 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1184 return IXGBE_ERR_FDIR_REINIT_FAILED; 1185 } 1186 1187 /* Clear FDIR statistics registers (read to clear) */ 1188 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1189 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1190 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1191 IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1192 IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1193 1194 return IXGBE_SUCCESS; 1195 } 1196 1197 /** 1198 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1199 * @hw: pointer to hardware structure 1200 * @pballoc: which mode to allocate filters with 1201 **/ 1202 int32_t ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, uint32_t pballoc) 1203 { 1204 uint32_t fdirctrl = 0; 1205 uint32_t pbsize; 1206 int i; 1207 1208 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1209 1210 /* 1211 * Before enabling Flow Director, the Rx Packet Buffer size 1212 * must be reduced. The new value is the current size minus 1213 * flow director memory usage size. 1214 */ 1215 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1216 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1217 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); 1218 1219 /* 1220 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1221 * intialized to zero for non DCB mode otherwise actual total RX PB 1222 * would be bigger than programmed and filter space would run into 1223 * the PB 0 region. 1224 */ 1225 for (i = 1; i < 8; i++) 1226 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1227 1228 /* Send interrupt when 64 filters are left */ 1229 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1230 1231 /* Set the maximum length per hash bucket to 0xA filters */ 1232 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT; 1233 1234 switch (pballoc) { 1235 case IXGBE_FDIR_PBALLOC_64K: 1236 /* 8k - 1 signature filters */ 1237 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1238 break; 1239 case IXGBE_FDIR_PBALLOC_128K: 1240 /* 16k - 1 signature filters */ 1241 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1242 break; 1243 case IXGBE_FDIR_PBALLOC_256K: 1244 /* 32k - 1 signature filters */ 1245 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1246 break; 1247 default: 1248 /* bad value */ 1249 return IXGBE_ERR_CONFIG; 1250 }; 1251 1252 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1253 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1254 1255 1256 /* Prime the keys for hashing */ 1257 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1258 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1259 1260 /* 1261 * Poll init-done after we write the register. Estimated times: 1262 * 10G: PBALLOC = 11b, timing is 60us 1263 * 1G: PBALLOC = 11b, timing is 600us 1264 * 100M: PBALLOC = 11b, timing is 6ms 1265 * 1266 * Multiple these timings by 4 if under full Rx load 1267 * 1268 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1269 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1270 * this might not finish in our poll time, but we can live with that 1271 * for now. 1272 */ 1273 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1274 IXGBE_WRITE_FLUSH(hw); 1275 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1276 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1277 IXGBE_FDIRCTRL_INIT_DONE) 1278 break; 1279 msec_delay(1); 1280 } 1281 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1282 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1283 1284 return IXGBE_SUCCESS; 1285 } 1286 1287 /** 1288 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1289 * @hw: pointer to hardware structure 1290 * @pballoc: which mode to allocate filters with 1291 **/ 1292 int32_t ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, uint32_t pballoc) 1293 { 1294 uint32_t fdirctrl = 0; 1295 uint32_t pbsize; 1296 int i; 1297 1298 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1299 1300 /* 1301 * Before enabling Flow Director, the Rx Packet Buffer size 1302 * must be reduced. The new value is the current size minus 1303 * flow director memory usage size. 1304 */ 1305 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1306 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1307 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); 1308 1309 /* 1310 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1311 * intialized to zero for non DCB mode otherwise actual total RX PB 1312 * would be bigger than programmed and filter space would run into 1313 * the PB 0 region. 1314 */ 1315 for (i = 1; i < 8; i++) 1316 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1317 1318 /* Send interrupt when 64 filters are left */ 1319 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1320 1321 /* Initialize the drop queue to Rx queue 127 */ 1322 fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT); 1323 1324 switch (pballoc) { 1325 case IXGBE_FDIR_PBALLOC_64K: 1326 /* 2k - 1 perfect filters */ 1327 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1328 break; 1329 case IXGBE_FDIR_PBALLOC_128K: 1330 /* 4k - 1 perfect filters */ 1331 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1332 break; 1333 case IXGBE_FDIR_PBALLOC_256K: 1334 /* 8k - 1 perfect filters */ 1335 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1336 break; 1337 default: 1338 /* bad value */ 1339 return IXGBE_ERR_CONFIG; 1340 }; 1341 1342 /* Turn perfect match filtering on */ 1343 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; 1344 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; 1345 1346 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1347 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1348 1349 /* Prime the keys for hashing */ 1350 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1351 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1352 1353 /* 1354 * Poll init-done after we write the register. Estimated times: 1355 * 10G: PBALLOC = 11b, timing is 60us 1356 * 1G: PBALLOC = 11b, timing is 600us 1357 * 100M: PBALLOC = 11b, timing is 6ms 1358 * 1359 * Multiple these timings by 4 if under full Rx load 1360 * 1361 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1362 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1363 * this might not finish in our poll time, but we can live with that 1364 * for now. 1365 */ 1366 1367 /* Set the maximum length per hash bucket to 0xA filters */ 1368 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); 1369 1370 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1371 IXGBE_WRITE_FLUSH(hw); 1372 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1373 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1374 IXGBE_FDIRCTRL_INIT_DONE) 1375 break; 1376 msec_delay(1); 1377 } 1378 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1379 DEBUGOUT("Flow Director Perfect poll time exceeded!\n"); 1380 1381 return IXGBE_SUCCESS; 1382 } 1383 1384 /** 1385 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR 1386 * @stream: input bitstream to compute the hash on 1387 * @key: 32-bit hash key 1388 **/ 1389 uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, 1390 uint32_t key) 1391 { 1392 /* 1393 * The algorithm is as follows: 1394 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 1395 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] 1396 * and A[n] x B[n] is bitwise AND between same length strings 1397 * 1398 * K[n] is 16 bits, defined as: 1399 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] 1400 * for n modulo 32 < 15, K[n] = 1401 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] 1402 * 1403 * S[n] is 16 bits, defined as: 1404 * for n >= 15, S[n] = S[n:n - 15] 1405 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] 1406 * 1407 * To simplify for programming, the algorithm is implemented 1408 * in software this way: 1409 * 1410 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] 1411 * 1412 * for (i = 0; i < 352; i+=32) 1413 * hi_hash_dword[31:0] ^= Stream[(i+31):i]; 1414 * 1415 * lo_hash_dword[15:0] ^= Stream[15:0]; 1416 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; 1417 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; 1418 * 1419 * hi_hash_dword[31:0] ^= Stream[351:320]; 1420 * 1421 * if(key[0]) 1422 * hash[15:0] ^= Stream[15:0]; 1423 * 1424 * for (i = 0; i < 16; i++) { 1425 * if (key[i]) 1426 * hash[15:0] ^= lo_hash_dword[(i+15):i]; 1427 * if (key[i + 16]) 1428 * hash[15:0] ^= hi_hash_dword[(i+15):i]; 1429 * } 1430 * 1431 */ 1432 __be32 common_hash_dword = 0; 1433 uint32_t hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1434 uint32_t hash_result = 0; 1435 uint8_t i; 1436 1437 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1438 flow_vm_vlan = ntohl(atr_input->dword_stream[0]); 1439 1440 /* generate common hash dword */ 1441 for (i = 10; i; i -= 2) 1442 common_hash_dword ^= atr_input->dword_stream[i] ^ 1443 atr_input->dword_stream[i - 1]; 1444 1445 hi_hash_dword = ntohl(common_hash_dword); 1446 1447 /* low dword is word swapped version of common */ 1448 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1449 1450 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1451 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1452 1453 /* Process bits 0 and 16 */ 1454 if (key & 0x0001) hash_result ^= lo_hash_dword; 1455 if (key & 0x00010000) hash_result ^= hi_hash_dword; 1456 1457 /* 1458 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1459 * delay this because bit 0 of the stream should not be processed 1460 * so we do not add the vlan until after bit 0 was processed 1461 */ 1462 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1463 1464 1465 /* process the remaining 30 bits in the key 2 bits at a time */ 1466 for (i = 15; i; i-- ) { 1467 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; 1468 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; 1469 } 1470 1471 return hash_result & IXGBE_ATR_HASH_MASK; 1472 } 1473 1474 /* 1475 * These defines allow us to quickly generate all of the necessary instructions 1476 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1477 * for values 0 through 15 1478 */ 1479 #define IXGBE_ATR_COMMON_HASH_KEY \ 1480 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1481 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1482 do { \ 1483 uint32_t n = (_n); \ 1484 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1485 common_hash ^= lo_hash_dword >> n; \ 1486 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1487 bucket_hash ^= lo_hash_dword >> n; \ 1488 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ 1489 sig_hash ^= lo_hash_dword << (16 - n); \ 1490 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ 1491 common_hash ^= hi_hash_dword >> n; \ 1492 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1493 bucket_hash ^= hi_hash_dword >> n; \ 1494 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1495 sig_hash ^= hi_hash_dword << (16 - n); \ 1496 } while (0); 1497 1498 /** 1499 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1500 * @stream: input bitstream to compute the hash on 1501 * 1502 * This function is almost identical to the function above but contains 1503 * several optomizations such as unwinding all of the loops, letting the 1504 * compiler work out all of the conditional ifs since the keys are static 1505 * defines, and computing two keys at once since the hashed dword stream 1506 * will be the same for both keys. 1507 **/ 1508 uint32_t ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1509 union ixgbe_atr_hash_dword common) 1510 { 1511 uint32_t hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1512 uint32_t sig_hash = 0, bucket_hash = 0, common_hash = 0; 1513 1514 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1515 flow_vm_vlan = ntohl(input.dword); 1516 1517 /* generate common hash dword */ 1518 hi_hash_dword = ntohl(common.dword); 1519 1520 /* low dword is word swapped version of common */ 1521 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1522 1523 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1524 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1525 1526 /* Process bits 0 and 16 */ 1527 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1528 1529 /* 1530 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1531 * delay this because bit 0 of the stream should not be processed 1532 * so we do not add the vlan until after bit 0 was processed 1533 */ 1534 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1535 1536 /* Process remaining 30 bit of the key */ 1537 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1538 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1539 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1540 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1541 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1542 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1543 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1544 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1545 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1546 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1547 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1548 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1549 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1550 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1551 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1552 1553 /* combine common_hash result with signature and bucket hashes */ 1554 bucket_hash ^= common_hash; 1555 bucket_hash &= IXGBE_ATR_HASH_MASK; 1556 1557 sig_hash ^= common_hash << 16; 1558 sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1559 1560 /* return completed signature hash */ 1561 return sig_hash ^ bucket_hash; 1562 } 1563 1564 /** 1565 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1566 * @hw: pointer to hardware structure 1567 * @stream: input bitstream 1568 * @queue: queue index to direct traffic to 1569 **/ 1570 int32_t ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1571 union ixgbe_atr_hash_dword input, 1572 union ixgbe_atr_hash_dword common, 1573 uint8_t queue) 1574 { 1575 uint64_t fdirhashcmd; 1576 uint64_t fdircmd; 1577 1578 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1579 1580 /* 1581 * Get the flow_type in order to program FDIRCMD properly 1582 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1583 */ 1584 switch (input.formatted.flow_type) { 1585 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1586 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1587 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1588 case IXGBE_ATR_FLOW_TYPE_TCPV6: 1589 case IXGBE_ATR_FLOW_TYPE_UDPV6: 1590 case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1591 break; 1592 default: 1593 DEBUGOUT(" Error on flow type input\n"); 1594 return IXGBE_ERR_CONFIG; 1595 } 1596 1597 /* configure FDIRCMD register */ 1598 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1599 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1600 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1601 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1602 1603 /* 1604 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1605 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1606 */ 1607 fdirhashcmd = (uint64_t)fdircmd << 32; 1608 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1609 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1610 1611 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (uint32_t)fdirhashcmd); 1612 1613 return IXGBE_SUCCESS; 1614 } 1615 1616 /** 1617 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks 1618 * @input_mask: mask to be bit swapped 1619 * 1620 * The source and destination port masks for flow director are bit swapped 1621 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1622 * generate a correctly swapped value we need to bit swap the mask and that 1623 * is what is accomplished by this function. 1624 **/ 1625 uint32_t ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) 1626 { 1627 uint32_t mask = ntohs(input_masks->dst_port_mask); 1628 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1629 mask |= ntohs(input_masks->src_port_mask); 1630 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1631 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1632 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1633 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1634 } 1635 1636 /* 1637 * These two macros are meant to address the fact that we have registers 1638 * that are either all or in part big-endian. As a result on big-endian 1639 * systems we will end up byte swapping the value to little-endian before 1640 * it is byte swapped again and written to the hardware in the original 1641 * big-endian format. 1642 */ 1643 #define IXGBE_STORE_AS_BE32(_value) \ 1644 (((uint32_t)(_value) >> 24) | (((uint32_t)(_value) & 0x00FF0000) >> 8) | \ 1645 (((uint32_t)(_value) & 0x0000FF00) << 8) | ((uint32_t)(_value) << 24)) 1646 1647 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1648 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) 1649 1650 #define IXGBE_STORE_AS_BE16(_value) \ 1651 (((uint16_t)(_value) >> 8) | ((uint16_t)(_value) << 8)) 1652 1653 1654 /** 1655 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1656 * @hw: pointer to hardware structure 1657 * @input_masks: masks for the input bitstream 1658 * @soft_id: software index for the filters 1659 * @queue: queue index to direct traffic to 1660 * 1661 * Note that the caller to this function must lock before calling, since the 1662 * hardware writes must be protected from one another. 1663 **/ 1664 int32_t ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1665 union ixgbe_atr_input *input, 1666 struct ixgbe_atr_input_masks *input_masks, 1667 uint16_t soft_id, uint8_t queue) 1668 { 1669 uint32_t fdirhash; 1670 uint32_t fdircmd; 1671 uint32_t fdirport, fdirtcpm; 1672 uint32_t fdirvlan; 1673 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */ 1674 uint32_t fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX | 1675 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; 1676 1677 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); 1678 1679 /* 1680 * Check flow_type formatting, and bail out before we touch the hardware 1681 * if there's a configuration issue 1682 */ 1683 switch (input->formatted.flow_type) { 1684 case IXGBE_ATR_FLOW_TYPE_IPV4: 1685 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ 1686 fdirm |= IXGBE_FDIRM_L4P; 1687 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1688 if (input_masks->dst_port_mask || input_masks->src_port_mask) { 1689 DEBUGOUT(" Error on src/dst port mask\n"); 1690 return IXGBE_ERR_CONFIG; 1691 } 1692 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1693 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1694 break; 1695 default: 1696 DEBUGOUT(" Error on flow type input\n"); 1697 return IXGBE_ERR_CONFIG; 1698 } 1699 1700 /* 1701 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1702 * are zero, then assume a full mask for that field. Also assume that 1703 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1704 * cannot be masked out in this implementation. 1705 * 1706 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1707 * point in time. 1708 */ 1709 1710 /* Program FDIRM */ 1711 switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) { 1712 case 0xEFFF: 1713 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ 1714 fdirm &= ~IXGBE_FDIRM_VLANID; 1715 case 0xE000: 1716 /* Unmask VLAN prio - bit 1 */ 1717 fdirm &= ~IXGBE_FDIRM_VLANP; 1718 break; 1719 case 0x0FFF: 1720 /* Unmask VLAN ID - bit 0 */ 1721 fdirm &= ~IXGBE_FDIRM_VLANID; 1722 break; 1723 case 0x0000: 1724 /* do nothing, vlans already masked */ 1725 break; 1726 default: 1727 DEBUGOUT(" Error on VLAN mask\n"); 1728 return IXGBE_ERR_CONFIG; 1729 } 1730 1731 if (input_masks->flex_mask & 0xFFFF) { 1732 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { 1733 DEBUGOUT(" Error on flexible byte mask\n"); 1734 return IXGBE_ERR_CONFIG; 1735 } 1736 /* Unmask Flex Bytes - bit 4 */ 1737 fdirm &= ~IXGBE_FDIRM_FLEX; 1738 } 1739 1740 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1741 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1742 1743 /* store the TCP/UDP port masks, bit reversed from port layout */ 1744 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); 1745 1746 /* write both the same so that UDP and TCP use the same mask */ 1747 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1748 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1749 1750 /* store source and destination IP masks (big-enian) */ 1751 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1752 ~input_masks->src_ip_mask[0]); 1753 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1754 ~input_masks->dst_ip_mask[0]); 1755 1756 /* Apply masks to input data */ 1757 input->formatted.vlan_id &= input_masks->vlan_id_mask; 1758 input->formatted.flex_bytes &= input_masks->flex_mask; 1759 input->formatted.src_port &= input_masks->src_port_mask; 1760 input->formatted.dst_port &= input_masks->dst_port_mask; 1761 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0]; 1762 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0]; 1763 1764 /* record vlan (little-endian) and flex_bytes(big-endian) */ 1765 fdirvlan = 1766 IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes)); 1767 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1768 fdirvlan |= ntohs(input->formatted.vlan_id); 1769 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1770 1771 /* record source and destination port (little-endian)*/ 1772 fdirport = ntohs(input->formatted.dst_port); 1773 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1774 fdirport |= ntohs(input->formatted.src_port); 1775 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1776 1777 /* record the first 32 bits of the destination address (big-endian) */ 1778 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); 1779 1780 /* record the source address (big-endian) */ 1781 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); 1782 1783 /* configure FDIRCMD register */ 1784 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1785 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1786 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1787 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1788 1789 /* we only want the bucket hash so drop the upper 16 bits */ 1790 fdirhash = ixgbe_atr_compute_hash_82599(input, 1791 IXGBE_ATR_BUCKET_HASH_KEY); 1792 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1793 1794 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1795 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1796 1797 return IXGBE_SUCCESS; 1798 } 1799 1800 /** 1801 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1802 * @hw: pointer to hardware structure 1803 * @reg: analog register to read 1804 * @val: read value 1805 * 1806 * Performs read operation to Omer analog register specified. 1807 **/ 1808 int32_t ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, uint32_t reg, uint8_t *val) 1809 { 1810 uint32_t core_ctl; 1811 1812 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 1813 1814 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1815 (reg << 8)); 1816 IXGBE_WRITE_FLUSH(hw); 1817 usec_delay(10); 1818 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1819 *val = (uint8_t)core_ctl; 1820 1821 return IXGBE_SUCCESS; 1822 } 1823 1824 /** 1825 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 1826 * @hw: pointer to hardware structure 1827 * @reg: atlas register to write 1828 * @val: value to write 1829 * 1830 * Performs write operation to Omer analog register specified. 1831 **/ 1832 int32_t ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, uint32_t reg, uint8_t val) 1833 { 1834 uint32_t core_ctl; 1835 1836 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); 1837 1838 core_ctl = (reg << 8) | val; 1839 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 1840 IXGBE_WRITE_FLUSH(hw); 1841 usec_delay(10); 1842 1843 return IXGBE_SUCCESS; 1844 } 1845 1846 /** 1847 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx 1848 * @hw: pointer to hardware structure 1849 * 1850 * Starts the hardware using the generic start_hw function 1851 * and the generation start_hw function. 1852 * Then performs revision-specific operations, if any. 1853 **/ 1854 int32_t ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw) 1855 { 1856 int32_t ret_val = IXGBE_SUCCESS; 1857 uint32_t gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 1858 1859 DEBUGFUNC("ixgbe_start_hw_rev_1__82599"); 1860 1861 ret_val = ixgbe_start_hw_generic(hw); 1862 if (ret_val != IXGBE_SUCCESS) 1863 goto out; 1864 1865 ret_val = ixgbe_start_hw_gen2(hw); 1866 if (ret_val != IXGBE_SUCCESS) 1867 goto out; 1868 1869 /* We need to run link autotry after the driver loads */ 1870 hw->mac.autotry_restart = TRUE; 1871 1872 /* 1873 * From the 82599 specification update: 1874 * set the completion timeout value for 16ms to 55ms if needed 1875 */ 1876 if (gcr & IXGBE_GCR_CAP_VER2) { 1877 uint16_t reg; 1878 reg = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); 1879 if ((reg & 0x0f) == 0) { 1880 reg |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 1881 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, 1882 reg); 1883 } 1884 } 1885 1886 if (ret_val == IXGBE_SUCCESS) 1887 ret_val = ixgbe_verify_fw_version_82599(hw); 1888 out: 1889 return ret_val; 1890 } 1891 1892 /** 1893 * ixgbe_identify_phy_82599 - Get physical layer module 1894 * @hw: pointer to hardware structure 1895 * 1896 * Determines the physical layer module found on the current adapter. 1897 * If PHY already detected, maintains current PHY type in hw struct, 1898 * otherwise executes the PHY detection routine. 1899 **/ 1900 int32_t ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1901 { 1902 int32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 1903 1904 DEBUGFUNC("ixgbe_identify_phy_82599"); 1905 1906 /* Detect PHY if not unknown - returns success if already detected. */ 1907 status = ixgbe_identify_phy_generic(hw); 1908 if (status != IXGBE_SUCCESS) { 1909 /* 82599 10GBASE-T requires an external PHY */ 1910 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 1911 goto out; 1912 else 1913 status = ixgbe_identify_sfp_module_generic(hw); 1914 } 1915 1916 /* Set PHY type none if no PHY detected */ 1917 if (hw->phy.type == ixgbe_phy_unknown) { 1918 hw->phy.type = ixgbe_phy_none; 1919 status = IXGBE_SUCCESS; 1920 } 1921 1922 /* Return error if SFP module has been detected but is not supported */ 1923 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 1924 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1925 1926 out: 1927 return status; 1928 } 1929 1930 /** 1931 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 1932 * @hw: pointer to hardware structure 1933 * 1934 * Determines physical layer capabilities of the current configuration. 1935 **/ 1936 uint32_t ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 1937 { 1938 uint32_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1939 uint32_t autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1940 uint32_t autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1941 uint32_t pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 1942 uint32_t pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1943 uint32_t pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1944 uint16_t ext_ability = 0; 1945 uint8_t comp_codes_10g = 0; 1946 uint8_t comp_codes_1g = 0; 1947 1948 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); 1949 1950 hw->phy.ops.identify(hw); 1951 1952 switch (hw->phy.type) { 1953 case ixgbe_phy_tn: 1954 case ixgbe_phy_aq: 1955 case ixgbe_phy_cu_unknown: 1956 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1957 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1958 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1959 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1960 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 1961 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1962 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 1963 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1964 goto out; 1965 default: 1966 break; 1967 } 1968 1969 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1970 case IXGBE_AUTOC_LMS_1G_AN: 1971 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1972 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 1973 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 1974 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1975 goto out; 1976 } else 1977 /* SFI mode so read SFP module */ 1978 goto sfp_check; 1979 break; 1980 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1981 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 1982 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1983 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 1984 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1985 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) 1986 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; 1987 goto out; 1988 break; 1989 case IXGBE_AUTOC_LMS_10G_SERIAL: 1990 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 1991 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 1992 goto out; 1993 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 1994 goto sfp_check; 1995 break; 1996 case IXGBE_AUTOC_LMS_KX4_KX_KR: 1997 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 1998 if (autoc & IXGBE_AUTOC_KX_SUPP) 1999 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 2000 if (autoc & IXGBE_AUTOC_KX4_SUPP) 2001 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2002 if (autoc & IXGBE_AUTOC_KR_SUPP) 2003 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2004 goto out; 2005 break; 2006 default: 2007 goto out; 2008 break; 2009 } 2010 2011 sfp_check: 2012 /* SFP check must be done last since DA modules are sometimes used to 2013 * test KR mode - we need to id KR mode correctly before SFP module. 2014 * Call identify_sfp because the pluggable module may have changed */ 2015 hw->phy.ops.identify_sfp(hw); 2016 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) 2017 goto out; 2018 2019 switch (hw->phy.type) { 2020 case ixgbe_phy_sfp_passive_tyco: 2021 case ixgbe_phy_sfp_passive_unknown: 2022 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2023 break; 2024 case ixgbe_phy_sfp_ftl_active: 2025 case ixgbe_phy_sfp_active_unknown: 2026 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; 2027 break; 2028 case ixgbe_phy_sfp_avago: 2029 case ixgbe_phy_sfp_ftl: 2030 case ixgbe_phy_sfp_intel: 2031 case ixgbe_phy_sfp_unknown: 2032 hw->phy.ops.read_i2c_eeprom(hw, 2033 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); 2034 hw->phy.ops.read_i2c_eeprom(hw, 2035 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); 2036 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 2037 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2038 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2039 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2040 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 2041 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; 2042 break; 2043 default: 2044 break; 2045 } 2046 2047 out: 2048 return physical_layer; 2049 } 2050 2051 /** 2052 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2053 * @hw: pointer to hardware structure 2054 * @regval: register value to write to RXCTRL 2055 * 2056 * Enables the Rx DMA unit for 82599 2057 **/ 2058 int32_t ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, uint32_t regval) 2059 { 2060 #define IXGBE_MAX_SECRX_POLL 30 2061 int i; 2062 int secrxreg; 2063 2064 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2065 2066 /* 2067 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2068 * If traffic is incoming before we enable the Rx unit, it could hang 2069 * the Rx DMA unit. Therefore, make sure the security engine is 2070 * completely disabled prior to enabling the Rx unit. 2071 */ 2072 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2073 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2074 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2075 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2076 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2077 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2078 break; 2079 else 2080 /* Use interrupt-safe sleep just in case */ 2081 usec_delay(10); 2082 } 2083 2084 /* For informational purposes only */ 2085 if (i >= IXGBE_MAX_SECRX_POLL) 2086 DEBUGOUT("Rx unit being enabled before security " 2087 "path fully disabled. Continuing with init.\n"); 2088 2089 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2090 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2091 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2092 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2093 IXGBE_WRITE_FLUSH(hw); 2094 2095 return IXGBE_SUCCESS; 2096 } 2097 2098 /** 2099 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 2100 * @hw: pointer to hardware structure 2101 * 2102 * Verifies that installed the firmware version is 0.6 or higher 2103 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 2104 * 2105 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or 2106 * if the FW version is not supported. 2107 **/ 2108 int32_t ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 2109 { 2110 int32_t status = IXGBE_ERR_EEPROM_VERSION; 2111 uint16_t fw_offset, fw_ptp_cfg_offset; 2112 uint16_t fw_version = 0; 2113 2114 DEBUGFUNC("ixgbe_verify_fw_version_82599"); 2115 2116 /* firmware check is only necessary for SFI devices */ 2117 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2118 status = IXGBE_SUCCESS; 2119 goto fw_version_out; 2120 } 2121 2122 /* get the offset to the Firmware Module block */ 2123 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2124 2125 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2126 goto fw_version_out; 2127 2128 /* get the offset to the Pass Through Patch Configuration block */ 2129 hw->eeprom.ops.read(hw, (fw_offset + 2130 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2131 &fw_ptp_cfg_offset); 2132 2133 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2134 goto fw_version_out; 2135 2136 /* get the firmware version */ 2137 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2138 IXGBE_FW_PATCH_VERSION_4), 2139 &fw_version); 2140 2141 if (fw_version > 0x5) 2142 status = IXGBE_SUCCESS; 2143 2144 fw_version_out: 2145 return status; 2146 } 2147 2148 /** 2149 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. 2150 * @hw: pointer to hardware structure 2151 * 2152 * Returns TRUE if the LESM FW module is present and enabled. Otherwise 2153 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. 2154 **/ 2155 int ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2156 { 2157 int lesm_enabled = FALSE; 2158 uint16_t fw_offset, fw_lesm_param_offset, fw_lesm_state; 2159 int32_t status; 2160 2161 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); 2162 2163 /* get the offset to the Firmware Module block */ 2164 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2165 2166 if ((status != IXGBE_SUCCESS) || 2167 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2168 goto out; 2169 2170 /* get the offset to the LESM Parameters block */ 2171 status = hw->eeprom.ops.read(hw, (fw_offset + 2172 IXGBE_FW_LESM_PARAMETERS_PTR), 2173 &fw_lesm_param_offset); 2174 2175 if ((status != IXGBE_SUCCESS) || 2176 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2177 goto out; 2178 2179 /* get the lesm state word */ 2180 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2181 IXGBE_FW_LESM_STATE_1), 2182 &fw_lesm_state); 2183 2184 if ((status == IXGBE_SUCCESS) && 2185 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2186 lesm_enabled = TRUE; 2187 2188 out: 2189 return lesm_enabled; 2190 } 2191 2192 2193