xref: /dragonfly/sys/dev/netif/ix/ixgbe_82599.c (revision d4ef6694)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2013, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 
41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
42 					 ixgbe_link_speed speed,
43 					 bool autoneg_wait_to_complete);
44 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
45 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
46 				   u16 offset, u16 *data);
47 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
48 					  u16 words, u16 *data);
49 
50 static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
51 {
52 	u32 fwsm, manc, factps;
53 
54 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
55 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
56 		return FALSE;
57 
58 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
59 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
60 		return FALSE;
61 
62 	factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
63 	if (factps & IXGBE_FACTPS_MNGCG)
64 		return FALSE;
65 
66 	return TRUE;
67 }
68 
69 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
70 {
71 	struct ixgbe_mac_info *mac = &hw->mac;
72 
73 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
74 
75 	/*
76 	 * enable the laser control functions for SFP+ fiber
77 	 * and MNG not enabled
78 	 */
79 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
80 	    !hw->mng_fw_enabled) {
81 		mac->ops.disable_tx_laser =
82 				       &ixgbe_disable_tx_laser_multispeed_fiber;
83 		mac->ops.enable_tx_laser =
84 					&ixgbe_enable_tx_laser_multispeed_fiber;
85 		mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
86 
87 	} else {
88 		mac->ops.disable_tx_laser = NULL;
89 		mac->ops.enable_tx_laser = NULL;
90 		mac->ops.flap_tx_laser = NULL;
91 	}
92 
93 	if (hw->phy.multispeed_fiber) {
94 		/* Set up dual speed SFP+ support */
95 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
96 	} else {
97 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
98 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
99 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
100 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
101 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
102 		} else {
103 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
104 		}
105 	}
106 }
107 
108 /**
109  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
110  *  @hw: pointer to hardware structure
111  *
112  *  Initialize any function pointers that were not able to be
113  *  set during init_shared_code because the PHY/SFP type was
114  *  not known.  Perform the SFP init if necessary.
115  *
116  **/
117 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
118 {
119 	struct ixgbe_mac_info *mac = &hw->mac;
120 	struct ixgbe_phy_info *phy = &hw->phy;
121 	s32 ret_val = IXGBE_SUCCESS;
122 
123 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
124 
125 	/* Identify the PHY or SFP module */
126 	ret_val = phy->ops.identify(hw);
127 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
128 		goto init_phy_ops_out;
129 
130 	/* Setup function pointers based on detected SFP module and speeds */
131 	ixgbe_init_mac_link_ops_82599(hw);
132 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
133 		hw->phy.ops.reset = NULL;
134 
135 	/* If copper media, overwrite with copper function pointers */
136 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
137 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
138 		mac->ops.get_link_capabilities =
139 				  &ixgbe_get_copper_link_capabilities_generic;
140 	}
141 
142 	/* Set necessary function pointers based on phy type */
143 	switch (hw->phy.type) {
144 	case ixgbe_phy_tn:
145 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
146 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
147 		phy->ops.get_firmware_version =
148 			     &ixgbe_get_phy_firmware_version_tnx;
149 		break;
150 	default:
151 		break;
152 	}
153 init_phy_ops_out:
154 	return ret_val;
155 }
156 
157 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
158 {
159 	s32 ret_val = IXGBE_SUCCESS;
160 	u16 list_offset, data_offset, data_value;
161 	bool got_lock = FALSE;
162 
163 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
164 
165 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
166 		ixgbe_init_mac_link_ops_82599(hw);
167 
168 		hw->phy.ops.reset = NULL;
169 
170 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
171 							      &data_offset);
172 		if (ret_val != IXGBE_SUCCESS)
173 			goto setup_sfp_out;
174 
175 		/* PHY config will finish before releasing the semaphore */
176 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
177 							IXGBE_GSSR_MAC_CSR_SM);
178 		if (ret_val != IXGBE_SUCCESS) {
179 			ret_val = IXGBE_ERR_SWFW_SYNC;
180 			goto setup_sfp_out;
181 		}
182 
183 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
184 			goto setup_sfp_err;
185 		while (data_value != 0xffff) {
186 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
187 			IXGBE_WRITE_FLUSH(hw);
188 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
189 				goto setup_sfp_err;
190 		}
191 
192 		/* Release the semaphore */
193 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
194 		/* Delay obtaining semaphore again to allow FW access */
195 		msec_delay(hw->eeprom.semaphore_delay);
196 
197 		/* Need SW/FW semaphore around AUTOC writes if LESM on,
198 		 * likewise reset_pipeline requires lock as it also writes
199 		 * AUTOC.
200 		 */
201 		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
202 			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
203 							IXGBE_GSSR_MAC_CSR_SM);
204 			if (ret_val != IXGBE_SUCCESS) {
205 				ret_val = IXGBE_ERR_SWFW_SYNC;
206 				goto setup_sfp_out;
207 			}
208 
209 			got_lock = TRUE;
210 		}
211 
212 		/* Restart DSP and set SFI mode */
213 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
214 				IXGBE_AUTOC_LMS_10G_SERIAL));
215 		hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
216 		ret_val = ixgbe_reset_pipeline_82599(hw);
217 
218 		if (got_lock) {
219 			hw->mac.ops.release_swfw_sync(hw,
220 						      IXGBE_GSSR_MAC_CSR_SM);
221 			got_lock = FALSE;
222 		}
223 
224 		if (ret_val) {
225 			DEBUGOUT("sfp module setup not complete\n");
226 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
227 			goto setup_sfp_out;
228 		}
229 
230 	}
231 
232 setup_sfp_out:
233 	return ret_val;
234 
235 setup_sfp_err:
236 	/* Release the semaphore */
237 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
238 	/* Delay obtaining semaphore again to allow FW access */
239 	msec_delay(hw->eeprom.semaphore_delay);
240 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
241 		      "eeprom read at offset %d failed", data_offset);
242 	return IXGBE_ERR_PHY;
243 }
244 
245 /**
246  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
247  *  @hw: pointer to hardware structure
248  *
249  *  Initialize the function pointers and assign the MAC type for 82599.
250  *  Does not touch the hardware.
251  **/
252 
253 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
254 {
255 	struct ixgbe_mac_info *mac = &hw->mac;
256 	struct ixgbe_phy_info *phy = &hw->phy;
257 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
258 	s32 ret_val;
259 
260 	DEBUGFUNC("ixgbe_init_ops_82599");
261 
262 	ixgbe_init_phy_ops_generic(hw);
263 	ret_val = ixgbe_init_ops_generic(hw);
264 
265 	/* PHY */
266 	phy->ops.identify = &ixgbe_identify_phy_82599;
267 	phy->ops.init = &ixgbe_init_phy_ops_82599;
268 
269 	/* MAC */
270 	mac->ops.reset_hw = &ixgbe_reset_hw_82599;
271 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
272 	mac->ops.get_media_type = &ixgbe_get_media_type_82599;
273 	mac->ops.get_supported_physical_layer =
274 				    &ixgbe_get_supported_physical_layer_82599;
275 	mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
276 	mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
277 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
278 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
279 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
280 	mac->ops.start_hw = &ixgbe_start_hw_82599;
281 	mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
282 	mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
283 	mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
284 	mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
285 	mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
286 
287 	/* RAR, Multicast, VLAN */
288 	mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
289 	mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
290 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
291 	mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
292 	mac->rar_highwater = 1;
293 	mac->ops.set_vfta = &ixgbe_set_vfta_generic;
294 	mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
295 	mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
296 	mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
297 	mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
298 	mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
299 	mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
300 
301 	/* Link */
302 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
303 	mac->ops.check_link = &ixgbe_check_mac_link_generic;
304 	mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
305 	ixgbe_init_mac_link_ops_82599(hw);
306 
307 	mac->mcft_size		= 128;
308 	mac->vft_size		= 128;
309 	mac->num_rar_entries	= 128;
310 	mac->rx_pb_size		= 512;
311 	mac->max_tx_queues	= 128;
312 	mac->max_rx_queues	= 128;
313 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
314 
315 	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
316 				   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
317 
318 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
319 
320 	/* EEPROM */
321 	eeprom->ops.read = &ixgbe_read_eeprom_82599;
322 	eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
323 
324 	/* Manageability interface */
325 	mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
326 
327 
328 	mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
329 
330 	/* Cache if MNG FW is up */
331 	hw->mng_fw_enabled = ixgbe_mng_enabled(hw);
332 
333 	return ret_val;
334 }
335 
336 /**
337  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
338  *  @hw: pointer to hardware structure
339  *  @speed: pointer to link speed
340  *  @autoneg: TRUE when autoneg or autotry is enabled
341  *
342  *  Determines the link capabilities by reading the AUTOC register.
343  **/
344 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
345 				      ixgbe_link_speed *speed,
346 				      bool *autoneg)
347 {
348 	s32 status = IXGBE_SUCCESS;
349 	u32 autoc = 0;
350 
351 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
352 
353 
354 	/* Check if 1G SFP module. */
355 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
356 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
357 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
358 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
359 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
360 		*autoneg = TRUE;
361 		goto out;
362 	}
363 
364 	/*
365 	 * Determine link capabilities based on the stored value of AUTOC,
366 	 * which represents EEPROM defaults.  If AUTOC value has not
367 	 * been stored, use the current register values.
368 	 */
369 	if (hw->mac.orig_link_settings_stored)
370 		autoc = hw->mac.orig_autoc;
371 	else
372 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
373 
374 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
375 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
376 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
377 		*autoneg = FALSE;
378 		break;
379 
380 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
381 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
382 		*autoneg = FALSE;
383 		break;
384 
385 	case IXGBE_AUTOC_LMS_1G_AN:
386 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
387 		*autoneg = TRUE;
388 		break;
389 
390 	case IXGBE_AUTOC_LMS_10G_SERIAL:
391 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
392 		*autoneg = FALSE;
393 		break;
394 
395 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
396 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
397 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
398 		if (autoc & IXGBE_AUTOC_KR_SUPP)
399 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
400 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
401 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
402 		if (autoc & IXGBE_AUTOC_KX_SUPP)
403 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
404 		*autoneg = TRUE;
405 		break;
406 
407 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
408 		*speed = IXGBE_LINK_SPEED_100_FULL;
409 		if (autoc & IXGBE_AUTOC_KR_SUPP)
410 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
411 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
412 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
413 		if (autoc & IXGBE_AUTOC_KX_SUPP)
414 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
415 		*autoneg = TRUE;
416 		break;
417 
418 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
419 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
420 		*autoneg = FALSE;
421 		break;
422 
423 	default:
424 		status = IXGBE_ERR_LINK_SETUP;
425 		goto out;
426 		break;
427 	}
428 
429 	if (hw->phy.multispeed_fiber) {
430 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
431 			  IXGBE_LINK_SPEED_1GB_FULL;
432 		*autoneg = TRUE;
433 	}
434 
435 out:
436 	return status;
437 }
438 
439 /**
440  *  ixgbe_get_media_type_82599 - Get media type
441  *  @hw: pointer to hardware structure
442  *
443  *  Returns the media type (fiber, copper, backplane)
444  **/
445 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
446 {
447 	enum ixgbe_media_type media_type;
448 
449 	DEBUGFUNC("ixgbe_get_media_type_82599");
450 
451 	/* Detect if there is a copper PHY attached. */
452 	switch (hw->phy.type) {
453 	case ixgbe_phy_cu_unknown:
454 	case ixgbe_phy_tn:
455 		media_type = ixgbe_media_type_copper;
456 		goto out;
457 	default:
458 		break;
459 	}
460 
461 	switch (hw->device_id) {
462 	case IXGBE_DEV_ID_82599_KX4:
463 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
464 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
465 	case IXGBE_DEV_ID_82599_KR:
466 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
467 	case IXGBE_DEV_ID_82599_XAUI_LOM:
468 		/* Default device ID is mezzanine card KX/KX4 */
469 		media_type = ixgbe_media_type_backplane;
470 		break;
471 	case IXGBE_DEV_ID_82599_SFP:
472 	case IXGBE_DEV_ID_82599_SFP_FCOE:
473 	case IXGBE_DEV_ID_82599_SFP_EM:
474 	case IXGBE_DEV_ID_82599_SFP_SF2:
475 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
476 	case IXGBE_DEV_ID_82599EN_SFP:
477 		media_type = ixgbe_media_type_fiber;
478 		break;
479 	case IXGBE_DEV_ID_82599_CX4:
480 		media_type = ixgbe_media_type_cx4;
481 		break;
482 	case IXGBE_DEV_ID_82599_T3_LOM:
483 		media_type = ixgbe_media_type_copper;
484 		break;
485 	case IXGBE_DEV_ID_82599_BYPASS:
486 		media_type = ixgbe_media_type_fiber_fixed;
487 		hw->phy.multispeed_fiber = TRUE;
488 		break;
489 	default:
490 		media_type = ixgbe_media_type_unknown;
491 		break;
492 	}
493 out:
494 	return media_type;
495 }
496 
497 /**
498  *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
499  *  @hw: pointer to hardware structure
500  *
501  *  Disables link during D3 power down sequence.
502  *
503  **/
504 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
505 {
506 	u32 autoc2_reg;
507 	u16 ee_ctrl_2 = 0;
508 
509 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
510 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
511 
512 	if (!hw->mng_fw_enabled && !hw->wol_enabled &&
513 		ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
514 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
515 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
516 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
517 	}
518 }
519 
520 /**
521  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
522  *  @hw: pointer to hardware structure
523  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
524  *
525  *  Configures link settings based on values in the ixgbe_hw struct.
526  *  Restarts the link.  Performs autonegotiation if needed.
527  **/
528 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
529 			       bool autoneg_wait_to_complete)
530 {
531 	u32 autoc_reg;
532 	u32 links_reg;
533 	u32 i;
534 	s32 status = IXGBE_SUCCESS;
535 	bool got_lock = FALSE;
536 
537 	DEBUGFUNC("ixgbe_start_mac_link_82599");
538 
539 
540 	/*  reset_pipeline requires us to hold this lock as it writes to
541 	 *  AUTOC.
542 	 */
543 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
544 		status = hw->mac.ops.acquire_swfw_sync(hw,
545 						       IXGBE_GSSR_MAC_CSR_SM);
546 		if (status != IXGBE_SUCCESS)
547 			goto out;
548 
549 		got_lock = TRUE;
550 	}
551 
552 	/* Restart link */
553 	ixgbe_reset_pipeline_82599(hw);
554 
555 	if (got_lock)
556 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
557 
558 	/* Only poll for autoneg to complete if specified to do so */
559 	if (autoneg_wait_to_complete) {
560 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
561 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
562 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
563 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
564 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
565 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
566 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
567 			links_reg = 0; /* Just in case Autoneg time = 0 */
568 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
569 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
570 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
571 					break;
572 				msec_delay(100);
573 			}
574 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
575 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
576 				DEBUGOUT("Autoneg did not complete.\n");
577 			}
578 		}
579 	}
580 
581 	/* Add delay to filter out noises during initial link setup */
582 	msec_delay(50);
583 
584 out:
585 	return status;
586 }
587 
588 /**
589  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
590  *  @hw: pointer to hardware structure
591  *
592  *  The base drivers may require better control over SFP+ module
593  *  PHY states.  This includes selectively shutting down the Tx
594  *  laser on the PHY, effectively halting physical link.
595  **/
596 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
597 {
598 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
599 
600 	/* Disable tx laser; allow 100us to go dark per spec */
601 	esdp_reg |= IXGBE_ESDP_SDP3;
602 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
603 	IXGBE_WRITE_FLUSH(hw);
604 	usec_delay(100);
605 }
606 
607 /**
608  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
609  *  @hw: pointer to hardware structure
610  *
611  *  The base drivers may require better control over SFP+ module
612  *  PHY states.  This includes selectively turning on the Tx
613  *  laser on the PHY, effectively starting physical link.
614  **/
615 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
616 {
617 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
618 
619 	/* Enable tx laser; allow 100ms to light up */
620 	esdp_reg &= ~IXGBE_ESDP_SDP3;
621 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
622 	IXGBE_WRITE_FLUSH(hw);
623 	msec_delay(100);
624 }
625 
626 /**
627  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
628  *  @hw: pointer to hardware structure
629  *
630  *  When the driver changes the link speeds that it can support,
631  *  it sets autotry_restart to TRUE to indicate that we need to
632  *  initiate a new autotry session with the link partner.  To do
633  *  so, we set the speed then disable and re-enable the tx laser, to
634  *  alert the link partner that it also needs to restart autotry on its
635  *  end.  This is consistent with TRUE clause 37 autoneg, which also
636  *  involves a loss of signal.
637  **/
638 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
639 {
640 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
641 
642 	if (hw->mac.autotry_restart) {
643 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
644 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
645 		hw->mac.autotry_restart = FALSE;
646 	}
647 }
648 
649 /**
650  *  ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
651  *  @hw: pointer to hardware structure
652  *  @speed: link speed to set
653  *
654  *  We set the module speed differently for fixed fiber.  For other
655  *  multi-speed devices we don't have an error value so here if we
656  *  detect an error we just log it and exit.
657  */
658 static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
659 					ixgbe_link_speed speed)
660 {
661 	s32 status;
662 	u8 rs, eeprom_data;
663 
664 	switch (speed) {
665 	case IXGBE_LINK_SPEED_10GB_FULL:
666 		/* one bit mask same as setting on */
667 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
668 		break;
669 	case IXGBE_LINK_SPEED_1GB_FULL:
670 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
671 		break;
672 	default:
673 		DEBUGOUT("Invalid fixed module speed\n");
674 		return;
675 	}
676 
677 	/* Set RS0 */
678 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
679 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
680 					   &eeprom_data);
681 	if (status) {
682 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
683 		goto out;
684 	}
685 
686 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
687 
688 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
689 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
690 					    eeprom_data);
691 	if (status) {
692 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
693 		goto out;
694 	}
695 
696 	/* Set RS1 */
697 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
698 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
699 					   &eeprom_data);
700 	if (status) {
701 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
702 		goto out;
703 	}
704 
705 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
706 
707 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
708 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
709 					    eeprom_data);
710 	if (status) {
711 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
712 		goto out;
713 	}
714 out:
715 	return;
716 }
717 
718 /**
719  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
720  *  @hw: pointer to hardware structure
721  *  @speed: new link speed
722  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
723  *
724  *  Set the link speed in the AUTOC register and restarts link.
725  **/
726 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
727 				     ixgbe_link_speed speed,
728 				     bool autoneg_wait_to_complete)
729 {
730 	s32 status = IXGBE_SUCCESS;
731 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
732 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
733 	u32 speedcnt = 0;
734 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
735 	u32 i = 0;
736 	bool autoneg, link_up = FALSE;
737 
738 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
739 
740 	/* Mask off requested but non-supported speeds */
741 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
742 	if (status != IXGBE_SUCCESS)
743 		return status;
744 
745 	speed &= link_speed;
746 
747 	/*
748 	 * Try each speed one by one, highest priority first.  We do this in
749 	 * software because 10gb fiber doesn't support speed autonegotiation.
750 	 */
751 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
752 		speedcnt++;
753 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
754 
755 		/* If we already have link at this speed, just jump out */
756 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
757 		if (status != IXGBE_SUCCESS)
758 			return status;
759 
760 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
761 			goto out;
762 
763 		/* Set the module link speed */
764 		if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
765 			ixgbe_set_fiber_fixed_speed(hw,
766 						    IXGBE_LINK_SPEED_10GB_FULL);
767 		} else {
768 			esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
769 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
770 			IXGBE_WRITE_FLUSH(hw);
771 		}
772 
773 		/* Allow module to change analog characteristics (1G->10G) */
774 		msec_delay(40);
775 
776 		status = ixgbe_setup_mac_link_82599(hw,
777 						    IXGBE_LINK_SPEED_10GB_FULL,
778 						    autoneg_wait_to_complete);
779 		if (status != IXGBE_SUCCESS)
780 			return status;
781 
782 		/* Flap the tx laser if it has not already been done */
783 		ixgbe_flap_tx_laser(hw);
784 
785 		/*
786 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
787 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
788 		 * attempted.  82599 uses the same timing for 10g SFI.
789 		 */
790 		for (i = 0; i < 5; i++) {
791 			/* Wait for the link partner to also set speed */
792 			msec_delay(100);
793 
794 			/* If we have link, just jump out */
795 			status = ixgbe_check_link(hw, &link_speed,
796 						  &link_up, FALSE);
797 			if (status != IXGBE_SUCCESS)
798 				return status;
799 
800 			if (link_up)
801 				goto out;
802 		}
803 	}
804 
805 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
806 		speedcnt++;
807 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
808 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
809 
810 		/* If we already have link at this speed, just jump out */
811 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
812 		if (status != IXGBE_SUCCESS)
813 			return status;
814 
815 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
816 			goto out;
817 
818 		/* Set the module link speed */
819 		if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
820 			ixgbe_set_fiber_fixed_speed(hw,
821 						    IXGBE_LINK_SPEED_1GB_FULL);
822 		} else {
823 			esdp_reg &= ~IXGBE_ESDP_SDP5;
824 			esdp_reg |= IXGBE_ESDP_SDP5_DIR;
825 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
826 			IXGBE_WRITE_FLUSH(hw);
827 		}
828 
829 		/* Allow module to change analog characteristics (10G->1G) */
830 		msec_delay(40);
831 
832 		status = ixgbe_setup_mac_link_82599(hw,
833 						    IXGBE_LINK_SPEED_1GB_FULL,
834 						    autoneg_wait_to_complete);
835 		if (status != IXGBE_SUCCESS)
836 			return status;
837 
838 		/* Flap the tx laser if it has not already been done */
839 		ixgbe_flap_tx_laser(hw);
840 
841 		/* Wait for the link partner to also set speed */
842 		msec_delay(100);
843 
844 		/* If we have link, just jump out */
845 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
846 		if (status != IXGBE_SUCCESS)
847 			return status;
848 
849 		if (link_up)
850 			goto out;
851 	}
852 
853 	/*
854 	 * We didn't get link.  Configure back to the highest speed we tried,
855 	 * (if there was more than one).  We call ourselves back with just the
856 	 * single highest speed that the user requested.
857 	 */
858 	if (speedcnt > 1)
859 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
860 			highest_link_speed, autoneg_wait_to_complete);
861 
862 out:
863 	/* Set autoneg_advertised value based on input link speed */
864 	hw->phy.autoneg_advertised = 0;
865 
866 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
867 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
868 
869 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
870 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
871 
872 	return status;
873 }
874 
875 /**
876  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
877  *  @hw: pointer to hardware structure
878  *  @speed: new link speed
879  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
880  *
881  *  Implements the Intel SmartSpeed algorithm.
882  **/
883 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
884 				    ixgbe_link_speed speed,
885 				    bool autoneg_wait_to_complete)
886 {
887 	s32 status = IXGBE_SUCCESS;
888 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
889 	s32 i, j;
890 	bool link_up = FALSE;
891 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
892 
893 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
894 
895 	 /* Set autoneg_advertised value based on input link speed */
896 	hw->phy.autoneg_advertised = 0;
897 
898 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
899 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
900 
901 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
902 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
903 
904 	if (speed & IXGBE_LINK_SPEED_100_FULL)
905 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
906 
907 	/*
908 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
909 	 * autoneg advertisement if link is unable to be established at the
910 	 * highest negotiated rate.  This can sometimes happen due to integrity
911 	 * issues with the physical media connection.
912 	 */
913 
914 	/* First, try to get link with full advertisement */
915 	hw->phy.smart_speed_active = FALSE;
916 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
917 		status = ixgbe_setup_mac_link_82599(hw, speed,
918 						    autoneg_wait_to_complete);
919 		if (status != IXGBE_SUCCESS)
920 			goto out;
921 
922 		/*
923 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
924 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
925 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
926 		 * Table 9 in the AN MAS.
927 		 */
928 		for (i = 0; i < 5; i++) {
929 			msec_delay(100);
930 
931 			/* If we have link, just jump out */
932 			status = ixgbe_check_link(hw, &link_speed, &link_up,
933 						  FALSE);
934 			if (status != IXGBE_SUCCESS)
935 				goto out;
936 
937 			if (link_up)
938 				goto out;
939 		}
940 	}
941 
942 	/*
943 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
944 	 * (or BX4/BX), then disable KR and try again.
945 	 */
946 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
947 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
948 		goto out;
949 
950 	/* Turn SmartSpeed on to disable KR support */
951 	hw->phy.smart_speed_active = TRUE;
952 	status = ixgbe_setup_mac_link_82599(hw, speed,
953 					    autoneg_wait_to_complete);
954 	if (status != IXGBE_SUCCESS)
955 		goto out;
956 
957 	/*
958 	 * Wait for the controller to acquire link.  600ms will allow for
959 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
960 	 * parallel detect, both 10g and 1g. This allows for the maximum
961 	 * connect attempts as defined in the AN MAS table 73-7.
962 	 */
963 	for (i = 0; i < 6; i++) {
964 		msec_delay(100);
965 
966 		/* If we have link, just jump out */
967 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
968 		if (status != IXGBE_SUCCESS)
969 			goto out;
970 
971 		if (link_up)
972 			goto out;
973 	}
974 
975 	/* We didn't get link.  Turn SmartSpeed back off. */
976 	hw->phy.smart_speed_active = FALSE;
977 	status = ixgbe_setup_mac_link_82599(hw, speed,
978 					    autoneg_wait_to_complete);
979 
980 out:
981 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
982 		DEBUGOUT("Smartspeed has downgraded the link speed "
983 		"from the maximum advertised\n");
984 	return status;
985 }
986 
987 /**
988  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
989  *  @hw: pointer to hardware structure
990  *  @speed: new link speed
991  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
992  *
993  *  Set the link speed in the AUTOC register and restarts link.
994  **/
995 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
996 			       ixgbe_link_speed speed,
997 			       bool autoneg_wait_to_complete)
998 {
999 	bool autoneg = FALSE;
1000 	s32 status = IXGBE_SUCCESS;
1001 	u32 autoc, pma_pmd_1g, link_mode, start_autoc;
1002 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1003 	u32 orig_autoc = 0;
1004 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1005 	u32 links_reg;
1006 	u32 i;
1007 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
1008 	bool got_lock = FALSE;
1009 
1010 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
1011 
1012 	/* Check to see if speed passed in is supported. */
1013 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
1014 	if (status)
1015 		goto out;
1016 
1017 	speed &= link_capabilities;
1018 
1019 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
1020 		status = IXGBE_ERR_LINK_SETUP;
1021 		goto out;
1022 	}
1023 
1024 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
1025 	if (hw->mac.orig_link_settings_stored)
1026 		autoc = hw->mac.orig_autoc;
1027 	else
1028 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1029 
1030 	orig_autoc = autoc;
1031 	start_autoc = hw->mac.cached_autoc;
1032 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
1033 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1034 
1035 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1036 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1037 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1038 		/* Set KX4/KX/KR support according to speed requested */
1039 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
1040 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1041 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
1042 				autoc |= IXGBE_AUTOC_KX4_SUPP;
1043 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
1044 			    (hw->phy.smart_speed_active == FALSE))
1045 				autoc |= IXGBE_AUTOC_KR_SUPP;
1046 		}
1047 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1048 			autoc |= IXGBE_AUTOC_KX_SUPP;
1049 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1050 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1051 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1052 		/* Switch from 1G SFI to 10G SFI if requested */
1053 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1054 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
1055 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
1056 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1057 		}
1058 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1059 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1060 		/* Switch from 10G SFI to 1G SFI if requested */
1061 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1062 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
1063 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
1064 			if (autoneg)
1065 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
1066 			else
1067 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
1068 		}
1069 	}
1070 
1071 	if (autoc != start_autoc) {
1072 		/* Need SW/FW semaphore around AUTOC writes if LESM is on,
1073 		 * likewise reset_pipeline requires us to hold this lock as
1074 		 * it also writes to AUTOC.
1075 		 */
1076 		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1077 			status = hw->mac.ops.acquire_swfw_sync(hw,
1078 							IXGBE_GSSR_MAC_CSR_SM);
1079 			if (status != IXGBE_SUCCESS) {
1080 				status = IXGBE_ERR_SWFW_SYNC;
1081 				goto out;
1082 			}
1083 
1084 			got_lock = TRUE;
1085 		}
1086 
1087 		/* Restart link */
1088 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
1089 		hw->mac.cached_autoc = autoc;
1090 		ixgbe_reset_pipeline_82599(hw);
1091 
1092 		if (got_lock) {
1093 			hw->mac.ops.release_swfw_sync(hw,
1094 						      IXGBE_GSSR_MAC_CSR_SM);
1095 			got_lock = FALSE;
1096 		}
1097 
1098 		/* Only poll for autoneg to complete if specified to do so */
1099 		if (autoneg_wait_to_complete) {
1100 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1101 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1102 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1103 				links_reg = 0; /*Just in case Autoneg time=0*/
1104 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1105 					links_reg =
1106 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
1107 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1108 						break;
1109 					msec_delay(100);
1110 				}
1111 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1112 					status =
1113 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1114 					DEBUGOUT("Autoneg did not complete.\n");
1115 				}
1116 			}
1117 		}
1118 
1119 		/* Add delay to filter out noises during initial link setup */
1120 		msec_delay(50);
1121 	}
1122 
1123 out:
1124 	return status;
1125 }
1126 
1127 /**
1128  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1129  *  @hw: pointer to hardware structure
1130  *  @speed: new link speed
1131  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1132  *
1133  *  Restarts link on PHY and MAC based on settings passed in.
1134  **/
1135 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1136 					 ixgbe_link_speed speed,
1137 					 bool autoneg_wait_to_complete)
1138 {
1139 	s32 status;
1140 
1141 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
1142 
1143 	/* Setup the PHY according to input speed */
1144 	status = hw->phy.ops.setup_link_speed(hw, speed,
1145 					      autoneg_wait_to_complete);
1146 	/* Set up MAC */
1147 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1148 
1149 	return status;
1150 }
1151 
1152 /**
1153  *  ixgbe_reset_hw_82599 - Perform hardware reset
1154  *  @hw: pointer to hardware structure
1155  *
1156  *  Resets the hardware by resetting the transmit and receive units, masks
1157  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1158  *  reset.
1159  **/
1160 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1161 {
1162 	ixgbe_link_speed link_speed;
1163 	s32 status;
1164 	u32 ctrl, i, autoc2;
1165 	u32 curr_lms;
1166 	bool link_up = FALSE;
1167 
1168 	DEBUGFUNC("ixgbe_reset_hw_82599");
1169 
1170 	/* Call adapter stop to disable tx/rx and clear interrupts */
1171 	status = hw->mac.ops.stop_adapter(hw);
1172 	if (status != IXGBE_SUCCESS)
1173 		goto reset_hw_out;
1174 
1175 	/* flush pending Tx transactions */
1176 	ixgbe_clear_tx_pending(hw);
1177 
1178 	/* PHY ops must be identified and initialized prior to reset */
1179 
1180 	/* Identify PHY and related function pointers */
1181 	status = hw->phy.ops.init(hw);
1182 
1183 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1184 		goto reset_hw_out;
1185 
1186 	/* Setup SFP module if there is one present. */
1187 	if (hw->phy.sfp_setup_needed) {
1188 		status = hw->mac.ops.setup_sfp(hw);
1189 		hw->phy.sfp_setup_needed = FALSE;
1190 	}
1191 
1192 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1193 		goto reset_hw_out;
1194 
1195 	/* Reset PHY */
1196 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1197 		hw->phy.ops.reset(hw);
1198 
1199 	/* remember AUTOC from before we reset */
1200 	if (hw->mac.cached_autoc)
1201 		curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
1202 	else
1203 		curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
1204 					  IXGBE_AUTOC_LMS_MASK;
1205 
1206 mac_reset_top:
1207 	/*
1208 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
1209 	 * If link reset is used when link is up, it might reset the PHY when
1210 	 * mng is using it.  If link is down or the flag to force full link
1211 	 * reset is set, then perform link reset.
1212 	 */
1213 	ctrl = IXGBE_CTRL_LNK_RST;
1214 	if (!hw->force_full_reset) {
1215 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1216 		if (link_up)
1217 			ctrl = IXGBE_CTRL_RST;
1218 	}
1219 
1220 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1221 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1222 	IXGBE_WRITE_FLUSH(hw);
1223 
1224 	/* Poll for reset bit to self-clear indicating reset is complete */
1225 	for (i = 0; i < 10; i++) {
1226 		usec_delay(1);
1227 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1228 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
1229 			break;
1230 	}
1231 
1232 	if (ctrl & IXGBE_CTRL_RST_MASK) {
1233 		status = IXGBE_ERR_RESET_FAILED;
1234 		DEBUGOUT("Reset polling failed to complete.\n");
1235 	}
1236 
1237 	msec_delay(50);
1238 
1239 	/*
1240 	 * Double resets are required for recovery from certain error
1241 	 * conditions.  Between resets, it is necessary to stall to allow time
1242 	 * for any pending HW events to complete.
1243 	 */
1244 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1245 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1246 		goto mac_reset_top;
1247 	}
1248 
1249 	/*
1250 	 * Store the original AUTOC/AUTOC2 values if they have not been
1251 	 * stored off yet.  Otherwise restore the stored original
1252 	 * values since the reset operation sets back to defaults.
1253 	 */
1254 	hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1255 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1256 
1257 	/* Enable link if disabled in NVM */
1258 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1259 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1260 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1261 		IXGBE_WRITE_FLUSH(hw);
1262 	}
1263 
1264 	if (hw->mac.orig_link_settings_stored == FALSE) {
1265 		hw->mac.orig_autoc = hw->mac.cached_autoc;
1266 		hw->mac.orig_autoc2 = autoc2;
1267 		hw->mac.orig_link_settings_stored = TRUE;
1268 	} else {
1269 
1270 		/* If MNG FW is running on a multi-speed device that
1271 		 * doesn't autoneg with out driver support we need to
1272 		 * leave LMS in the state it was before we MAC reset.
1273 		 * Likewise if we support WoL we don't want change the
1274 		 * LMS state.
1275 		 */
1276 		if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
1277 		    hw->wol_enabled)
1278 			hw->mac.orig_autoc =
1279 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1280 				curr_lms;
1281 
1282 		if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
1283 			/* Need SW/FW semaphore around AUTOC writes if LESM is
1284 			 * on, likewise reset_pipeline requires us to hold
1285 			 * this lock as it also writes to AUTOC.
1286 			 */
1287 			bool got_lock = FALSE;
1288 			if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1289 				status = hw->mac.ops.acquire_swfw_sync(hw,
1290 							IXGBE_GSSR_MAC_CSR_SM);
1291 				if (status != IXGBE_SUCCESS) {
1292 					status = IXGBE_ERR_SWFW_SYNC;
1293 					goto reset_hw_out;
1294 				}
1295 
1296 				got_lock = TRUE;
1297 			}
1298 
1299 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1300 			hw->mac.cached_autoc = hw->mac.orig_autoc;
1301 			ixgbe_reset_pipeline_82599(hw);
1302 
1303 			if (got_lock)
1304 				hw->mac.ops.release_swfw_sync(hw,
1305 						      IXGBE_GSSR_MAC_CSR_SM);
1306 		}
1307 
1308 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1309 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1310 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1311 			autoc2 |= (hw->mac.orig_autoc2 &
1312 				   IXGBE_AUTOC2_UPPER_MASK);
1313 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1314 		}
1315 	}
1316 
1317 	/* Store the permanent mac address */
1318 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1319 
1320 	/*
1321 	 * Store MAC address from RAR0, clear receive address registers, and
1322 	 * clear the multicast table.  Also reset num_rar_entries to 128,
1323 	 * since we modify this value when programming the SAN MAC address.
1324 	 */
1325 	hw->mac.num_rar_entries = 128;
1326 	hw->mac.ops.init_rx_addrs(hw);
1327 
1328 	/* Store the permanent SAN mac address */
1329 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1330 
1331 	/* Add the SAN MAC address to the RAR only if it's a valid address */
1332 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1333 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1334 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1335 
1336 		/* Save the SAN MAC RAR index */
1337 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1338 
1339 		/* Reserve the last RAR for the SAN MAC address */
1340 		hw->mac.num_rar_entries--;
1341 	}
1342 
1343 	/* Store the alternative WWNN/WWPN prefix */
1344 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1345 				   &hw->mac.wwpn_prefix);
1346 
1347 reset_hw_out:
1348 	return status;
1349 }
1350 
1351 /**
1352  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1353  *  @hw: pointer to hardware structure
1354  **/
1355 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1356 {
1357 	int i;
1358 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1359 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1360 
1361 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1362 
1363 	/*
1364 	 * Before starting reinitialization process,
1365 	 * FDIRCMD.CMD must be zero.
1366 	 */
1367 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1368 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1369 		      IXGBE_FDIRCMD_CMD_MASK))
1370 			break;
1371 		usec_delay(10);
1372 	}
1373 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1374 		DEBUGOUT("Flow Director previous command isn't complete, "
1375 			 "aborting table re-initialization.\n");
1376 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1377 	}
1378 
1379 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1380 	IXGBE_WRITE_FLUSH(hw);
1381 	/*
1382 	 * 82599 adapters flow director init flow cannot be restarted,
1383 	 * Workaround 82599 silicon errata by performing the following steps
1384 	 * before re-writing the FDIRCTRL control register with the same value.
1385 	 * - write 1 to bit 8 of FDIRCMD register &
1386 	 * - write 0 to bit 8 of FDIRCMD register
1387 	 */
1388 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1389 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1390 			 IXGBE_FDIRCMD_CLEARHT));
1391 	IXGBE_WRITE_FLUSH(hw);
1392 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1393 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1394 			 ~IXGBE_FDIRCMD_CLEARHT));
1395 	IXGBE_WRITE_FLUSH(hw);
1396 	/*
1397 	 * Clear FDIR Hash register to clear any leftover hashes
1398 	 * waiting to be programmed.
1399 	 */
1400 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1401 	IXGBE_WRITE_FLUSH(hw);
1402 
1403 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1404 	IXGBE_WRITE_FLUSH(hw);
1405 
1406 	/* Poll init-done after we write FDIRCTRL register */
1407 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1408 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1409 				   IXGBE_FDIRCTRL_INIT_DONE)
1410 			break;
1411 		msec_delay(1);
1412 	}
1413 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1414 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1415 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1416 	}
1417 
1418 	/* Clear FDIR statistics registers (read to clear) */
1419 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1420 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1421 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1422 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1423 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1424 
1425 	return IXGBE_SUCCESS;
1426 }
1427 
1428 /**
1429  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1430  *  @hw: pointer to hardware structure
1431  *  @fdirctrl: value to write to flow director control register
1432  **/
1433 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1434 {
1435 	int i;
1436 
1437 	DEBUGFUNC("ixgbe_fdir_enable_82599");
1438 
1439 	/* Prime the keys for hashing */
1440 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1441 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1442 
1443 	/*
1444 	 * Poll init-done after we write the register.  Estimated times:
1445 	 *      10G: PBALLOC = 11b, timing is 60us
1446 	 *       1G: PBALLOC = 11b, timing is 600us
1447 	 *     100M: PBALLOC = 11b, timing is 6ms
1448 	 *
1449 	 *     Multiple these timings by 4 if under full Rx load
1450 	 *
1451 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1452 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1453 	 * this might not finish in our poll time, but we can live with that
1454 	 * for now.
1455 	 */
1456 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1457 	IXGBE_WRITE_FLUSH(hw);
1458 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1459 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1460 				   IXGBE_FDIRCTRL_INIT_DONE)
1461 			break;
1462 		msec_delay(1);
1463 	}
1464 
1465 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1466 		DEBUGOUT("Flow Director poll time exceeded!\n");
1467 }
1468 
1469 /**
1470  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1471  *  @hw: pointer to hardware structure
1472  *  @fdirctrl: value to write to flow director control register, initially
1473  *	     contains just the value of the Rx packet buffer allocation
1474  **/
1475 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1476 {
1477 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1478 
1479 	/*
1480 	 * Continue setup of fdirctrl register bits:
1481 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1482 	 *  Set the maximum length per hash bucket to 0xA filters
1483 	 *  Send interrupt when 64 filters are left
1484 	 */
1485 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1486 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1487 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1488 
1489 	/* write hashes and fdirctrl register, poll for completion */
1490 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1491 
1492 	return IXGBE_SUCCESS;
1493 }
1494 
1495 /**
1496  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1497  *  @hw: pointer to hardware structure
1498  *  @fdirctrl: value to write to flow director control register, initially
1499  *	     contains just the value of the Rx packet buffer allocation
1500  **/
1501 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1502 {
1503 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1504 
1505 	/*
1506 	 * Continue setup of fdirctrl register bits:
1507 	 *  Turn perfect match filtering on
1508 	 *  Report hash in RSS field of Rx wb descriptor
1509 	 *  Initialize the drop queue
1510 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1511 	 *  Set the maximum length per hash bucket to 0xA filters
1512 	 *  Send interrupt when 64 (0x4 * 16) filters are left
1513 	 */
1514 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1515 		    IXGBE_FDIRCTRL_REPORT_STATUS |
1516 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1517 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1518 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1519 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1520 
1521 	/* write hashes and fdirctrl register, poll for completion */
1522 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1523 
1524 	return IXGBE_SUCCESS;
1525 }
1526 
1527 /*
1528  * These defines allow us to quickly generate all of the necessary instructions
1529  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1530  * for values 0 through 15
1531  */
1532 #define IXGBE_ATR_COMMON_HASH_KEY \
1533 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1534 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1535 do { \
1536 	u32 n = (_n); \
1537 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1538 		common_hash ^= lo_hash_dword >> n; \
1539 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1540 		bucket_hash ^= lo_hash_dword >> n; \
1541 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1542 		sig_hash ^= lo_hash_dword << (16 - n); \
1543 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1544 		common_hash ^= hi_hash_dword >> n; \
1545 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1546 		bucket_hash ^= hi_hash_dword >> n; \
1547 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1548 		sig_hash ^= hi_hash_dword << (16 - n); \
1549 } while (0);
1550 
1551 /**
1552  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1553  *  @stream: input bitstream to compute the hash on
1554  *
1555  *  This function is almost identical to the function above but contains
1556  *  several optomizations such as unwinding all of the loops, letting the
1557  *  compiler work out all of the conditional ifs since the keys are static
1558  *  defines, and computing two keys at once since the hashed dword stream
1559  *  will be the same for both keys.
1560  **/
1561 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1562 				     union ixgbe_atr_hash_dword common)
1563 {
1564 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1565 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1566 
1567 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1568 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
1569 
1570 	/* generate common hash dword */
1571 	hi_hash_dword = IXGBE_NTOHL(common.dword);
1572 
1573 	/* low dword is word swapped version of common */
1574 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1575 
1576 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1577 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1578 
1579 	/* Process bits 0 and 16 */
1580 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1581 
1582 	/*
1583 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1584 	 * delay this because bit 0 of the stream should not be processed
1585 	 * so we do not add the vlan until after bit 0 was processed
1586 	 */
1587 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1588 
1589 	/* Process remaining 30 bit of the key */
1590 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1591 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1592 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1593 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1594 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1595 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1596 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1597 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1598 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1599 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1600 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1601 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1602 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1603 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1604 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1605 
1606 	/* combine common_hash result with signature and bucket hashes */
1607 	bucket_hash ^= common_hash;
1608 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1609 
1610 	sig_hash ^= common_hash << 16;
1611 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1612 
1613 	/* return completed signature hash */
1614 	return sig_hash ^ bucket_hash;
1615 }
1616 
1617 /**
1618  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1619  *  @hw: pointer to hardware structure
1620  *  @input: unique input dword
1621  *  @common: compressed common input dword
1622  *  @queue: queue index to direct traffic to
1623  **/
1624 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1625 					  union ixgbe_atr_hash_dword input,
1626 					  union ixgbe_atr_hash_dword common,
1627 					  u8 queue)
1628 {
1629 	u64  fdirhashcmd;
1630 	u32  fdircmd;
1631 
1632 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1633 
1634 	/*
1635 	 * Get the flow_type in order to program FDIRCMD properly
1636 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1637 	 */
1638 	switch (input.formatted.flow_type) {
1639 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1640 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1641 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1642 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
1643 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
1644 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1645 		break;
1646 	default:
1647 		DEBUGOUT(" Error on flow type input\n");
1648 		return IXGBE_ERR_CONFIG;
1649 	}
1650 
1651 	/* configure FDIRCMD register */
1652 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1653 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1654 	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1655 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1656 
1657 	/*
1658 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1659 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1660 	 */
1661 	fdirhashcmd = (u64)fdircmd << 32;
1662 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1663 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1664 
1665 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1666 
1667 	return IXGBE_SUCCESS;
1668 }
1669 
1670 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1671 do { \
1672 	u32 n = (_n); \
1673 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1674 		bucket_hash ^= lo_hash_dword >> n; \
1675 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1676 		bucket_hash ^= hi_hash_dword >> n; \
1677 } while (0);
1678 
1679 /**
1680  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1681  *  @atr_input: input bitstream to compute the hash on
1682  *  @input_mask: mask for the input bitstream
1683  *
1684  *  This function serves two main purposes.  First it applys the input_mask
1685  *  to the atr_input resulting in a cleaned up atr_input data stream.
1686  *  Secondly it computes the hash and stores it in the bkt_hash field at
1687  *  the end of the input byte stream.  This way it will be available for
1688  *  future use without needing to recompute the hash.
1689  **/
1690 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1691 					  union ixgbe_atr_input *input_mask)
1692 {
1693 
1694 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1695 	u32 bucket_hash = 0;
1696 
1697 	/* Apply masks to input data */
1698 	input->dword_stream[0]  &= input_mask->dword_stream[0];
1699 	input->dword_stream[1]  &= input_mask->dword_stream[1];
1700 	input->dword_stream[2]  &= input_mask->dword_stream[2];
1701 	input->dword_stream[3]  &= input_mask->dword_stream[3];
1702 	input->dword_stream[4]  &= input_mask->dword_stream[4];
1703 	input->dword_stream[5]  &= input_mask->dword_stream[5];
1704 	input->dword_stream[6]  &= input_mask->dword_stream[6];
1705 	input->dword_stream[7]  &= input_mask->dword_stream[7];
1706 	input->dword_stream[8]  &= input_mask->dword_stream[8];
1707 	input->dword_stream[9]  &= input_mask->dword_stream[9];
1708 	input->dword_stream[10] &= input_mask->dword_stream[10];
1709 
1710 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1711 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1712 
1713 	/* generate common hash dword */
1714 	hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1715 				    input->dword_stream[2] ^
1716 				    input->dword_stream[3] ^
1717 				    input->dword_stream[4] ^
1718 				    input->dword_stream[5] ^
1719 				    input->dword_stream[6] ^
1720 				    input->dword_stream[7] ^
1721 				    input->dword_stream[8] ^
1722 				    input->dword_stream[9] ^
1723 				    input->dword_stream[10]);
1724 
1725 	/* low dword is word swapped version of common */
1726 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1727 
1728 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1729 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1730 
1731 	/* Process bits 0 and 16 */
1732 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1733 
1734 	/*
1735 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1736 	 * delay this because bit 0 of the stream should not be processed
1737 	 * so we do not add the vlan until after bit 0 was processed
1738 	 */
1739 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1740 
1741 	/* Process remaining 30 bit of the key */
1742 	IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1743 	IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1744 	IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1745 	IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1746 	IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1747 	IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1748 	IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1749 	IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1750 	IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1751 	IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1752 	IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1753 	IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1754 	IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1755 	IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1756 	IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1757 
1758 	/*
1759 	 * Limit hash to 13 bits since max bucket count is 8K.
1760 	 * Store result at the end of the input stream.
1761 	 */
1762 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1763 }
1764 
1765 /**
1766  *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1767  *  @input_mask: mask to be bit swapped
1768  *
1769  *  The source and destination port masks for flow director are bit swapped
1770  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1771  *  generate a correctly swapped value we need to bit swap the mask and that
1772  *  is what is accomplished by this function.
1773  **/
1774 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1775 {
1776 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1777 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1778 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1779 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1780 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1781 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1782 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1783 }
1784 
1785 /*
1786  * These two macros are meant to address the fact that we have registers
1787  * that are either all or in part big-endian.  As a result on big-endian
1788  * systems we will end up byte swapping the value to little-endian before
1789  * it is byte swapped again and written to the hardware in the original
1790  * big-endian format.
1791  */
1792 #define IXGBE_STORE_AS_BE32(_value) \
1793 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1794 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1795 
1796 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1797 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1798 
1799 #define IXGBE_STORE_AS_BE16(_value) \
1800 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1801 
1802 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1803 				    union ixgbe_atr_input *input_mask)
1804 {
1805 	/* mask IPv6 since it is currently not supported */
1806 	u32 fdirm = IXGBE_FDIRM_DIPv6;
1807 	u32 fdirtcpm;
1808 
1809 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1810 
1811 	/*
1812 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1813 	 * are zero, then assume a full mask for that field.  Also assume that
1814 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1815 	 * cannot be masked out in this implementation.
1816 	 *
1817 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1818 	 * point in time.
1819 	 */
1820 
1821 	/* verify bucket hash is cleared on hash generation */
1822 	if (input_mask->formatted.bkt_hash)
1823 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
1824 
1825 	/* Program FDIRM and verify partial masks */
1826 	switch (input_mask->formatted.vm_pool & 0x7F) {
1827 	case 0x0:
1828 		fdirm |= IXGBE_FDIRM_POOL;
1829 	case 0x7F:
1830 		break;
1831 	default:
1832 		DEBUGOUT(" Error on vm pool mask\n");
1833 		return IXGBE_ERR_CONFIG;
1834 	}
1835 
1836 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1837 	case 0x0:
1838 		fdirm |= IXGBE_FDIRM_L4P;
1839 		if (input_mask->formatted.dst_port ||
1840 		    input_mask->formatted.src_port) {
1841 			DEBUGOUT(" Error on src/dst port mask\n");
1842 			return IXGBE_ERR_CONFIG;
1843 		}
1844 	case IXGBE_ATR_L4TYPE_MASK:
1845 		break;
1846 	default:
1847 		DEBUGOUT(" Error on flow type mask\n");
1848 		return IXGBE_ERR_CONFIG;
1849 	}
1850 
1851 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1852 	case 0x0000:
1853 		/* mask VLAN ID, fall through to mask VLAN priority */
1854 		fdirm |= IXGBE_FDIRM_VLANID;
1855 	case 0x0FFF:
1856 		/* mask VLAN priority */
1857 		fdirm |= IXGBE_FDIRM_VLANP;
1858 		break;
1859 	case 0xE000:
1860 		/* mask VLAN ID only, fall through */
1861 		fdirm |= IXGBE_FDIRM_VLANID;
1862 	case 0xEFFF:
1863 		/* no VLAN fields masked */
1864 		break;
1865 	default:
1866 		DEBUGOUT(" Error on VLAN mask\n");
1867 		return IXGBE_ERR_CONFIG;
1868 	}
1869 
1870 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1871 	case 0x0000:
1872 		/* Mask Flex Bytes, fall through */
1873 		fdirm |= IXGBE_FDIRM_FLEX;
1874 	case 0xFFFF:
1875 		break;
1876 	default:
1877 		DEBUGOUT(" Error on flexible byte mask\n");
1878 		return IXGBE_ERR_CONFIG;
1879 	}
1880 
1881 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1882 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1883 
1884 	/* store the TCP/UDP port masks, bit reversed from port layout */
1885 	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1886 
1887 	/* write both the same so that UDP and TCP use the same mask */
1888 	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1889 	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1890 
1891 	/* store source and destination IP masks (big-enian) */
1892 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1893 			     ~input_mask->formatted.src_ip[0]);
1894 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1895 			     ~input_mask->formatted.dst_ip[0]);
1896 
1897 	return IXGBE_SUCCESS;
1898 }
1899 
1900 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1901 					  union ixgbe_atr_input *input,
1902 					  u16 soft_id, u8 queue)
1903 {
1904 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
1905 
1906 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1907 
1908 	/* currently IPv6 is not supported, must be programmed with 0 */
1909 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1910 			     input->formatted.src_ip[0]);
1911 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1912 			     input->formatted.src_ip[1]);
1913 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1914 			     input->formatted.src_ip[2]);
1915 
1916 	/* record the source address (big-endian) */
1917 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1918 
1919 	/* record the first 32 bits of the destination address (big-endian) */
1920 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1921 
1922 	/* record source and destination port (little-endian)*/
1923 	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1924 	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1925 	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1926 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1927 
1928 	/* record vlan (little-endian) and flex_bytes(big-endian) */
1929 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1930 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1931 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1932 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1933 
1934 	/* configure FDIRHASH register */
1935 	fdirhash = input->formatted.bkt_hash;
1936 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1937 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1938 
1939 	/*
1940 	 * flush all previous writes to make certain registers are
1941 	 * programmed prior to issuing the command
1942 	 */
1943 	IXGBE_WRITE_FLUSH(hw);
1944 
1945 	/* configure FDIRCMD register */
1946 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1947 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1948 	if (queue == IXGBE_FDIR_DROP_QUEUE)
1949 		fdircmd |= IXGBE_FDIRCMD_DROP;
1950 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1951 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1952 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1953 
1954 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1955 
1956 	return IXGBE_SUCCESS;
1957 }
1958 
1959 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1960 					  union ixgbe_atr_input *input,
1961 					  u16 soft_id)
1962 {
1963 	u32 fdirhash;
1964 	u32 fdircmd = 0;
1965 	u32 retry_count;
1966 	s32 err = IXGBE_SUCCESS;
1967 
1968 	/* configure FDIRHASH register */
1969 	fdirhash = input->formatted.bkt_hash;
1970 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1971 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1972 
1973 	/* flush hash to HW */
1974 	IXGBE_WRITE_FLUSH(hw);
1975 
1976 	/* Query if filter is present */
1977 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1978 
1979 	for (retry_count = 10; retry_count; retry_count--) {
1980 		/* allow 10us for query to process */
1981 		usec_delay(10);
1982 		/* verify query completed successfully */
1983 		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1984 		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1985 			break;
1986 	}
1987 
1988 	if (!retry_count)
1989 		err = IXGBE_ERR_FDIR_REINIT_FAILED;
1990 
1991 	/* if filter exists in hardware then remove it */
1992 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1993 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1994 		IXGBE_WRITE_FLUSH(hw);
1995 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1996 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1997 	}
1998 
1999 	return err;
2000 }
2001 
2002 /**
2003  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2004  *  @hw: pointer to hardware structure
2005  *  @input: input bitstream
2006  *  @input_mask: mask for the input bitstream
2007  *  @soft_id: software index for the filters
2008  *  @queue: queue index to direct traffic to
2009  *
2010  *  Note that the caller to this function must lock before calling, since the
2011  *  hardware writes must be protected from one another.
2012  **/
2013 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2014 					union ixgbe_atr_input *input,
2015 					union ixgbe_atr_input *input_mask,
2016 					u16 soft_id, u8 queue)
2017 {
2018 	s32 err = IXGBE_ERR_CONFIG;
2019 
2020 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2021 
2022 	/*
2023 	 * Check flow_type formatting, and bail out before we touch the hardware
2024 	 * if there's a configuration issue
2025 	 */
2026 	switch (input->formatted.flow_type) {
2027 	case IXGBE_ATR_FLOW_TYPE_IPV4:
2028 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2029 		if (input->formatted.dst_port || input->formatted.src_port) {
2030 			DEBUGOUT(" Error on src/dst port\n");
2031 			return IXGBE_ERR_CONFIG;
2032 		}
2033 		break;
2034 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2035 		if (input->formatted.dst_port || input->formatted.src_port) {
2036 			DEBUGOUT(" Error on src/dst port\n");
2037 			return IXGBE_ERR_CONFIG;
2038 		}
2039 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
2040 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
2041 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2042 						  IXGBE_ATR_L4TYPE_MASK;
2043 		break;
2044 	default:
2045 		DEBUGOUT(" Error on flow type input\n");
2046 		return err;
2047 	}
2048 
2049 	/* program input mask into the HW */
2050 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
2051 	if (err)
2052 		return err;
2053 
2054 	/* apply mask and compute/store hash */
2055 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2056 
2057 	/* program filters to filter memory */
2058 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2059 						     soft_id, queue);
2060 }
2061 
2062 /**
2063  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2064  *  @hw: pointer to hardware structure
2065  *  @reg: analog register to read
2066  *  @val: read value
2067  *
2068  *  Performs read operation to Omer analog register specified.
2069  **/
2070 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2071 {
2072 	u32  core_ctl;
2073 
2074 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2075 
2076 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2077 			(reg << 8));
2078 	IXGBE_WRITE_FLUSH(hw);
2079 	usec_delay(10);
2080 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2081 	*val = (u8)core_ctl;
2082 
2083 	return IXGBE_SUCCESS;
2084 }
2085 
2086 /**
2087  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2088  *  @hw: pointer to hardware structure
2089  *  @reg: atlas register to write
2090  *  @val: value to write
2091  *
2092  *  Performs write operation to Omer analog register specified.
2093  **/
2094 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2095 {
2096 	u32  core_ctl;
2097 
2098 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2099 
2100 	core_ctl = (reg << 8) | val;
2101 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2102 	IXGBE_WRITE_FLUSH(hw);
2103 	usec_delay(10);
2104 
2105 	return IXGBE_SUCCESS;
2106 }
2107 
2108 /**
2109  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2110  *  @hw: pointer to hardware structure
2111  *
2112  *  Starts the hardware using the generic start_hw function
2113  *  and the generation start_hw function.
2114  *  Then performs revision-specific operations, if any.
2115  **/
2116 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2117 {
2118 	s32 ret_val = IXGBE_SUCCESS;
2119 
2120 	DEBUGFUNC("ixgbe_start_hw_82599");
2121 
2122 	ret_val = ixgbe_start_hw_generic(hw);
2123 	if (ret_val != IXGBE_SUCCESS)
2124 		goto out;
2125 
2126 	ret_val = ixgbe_start_hw_gen2(hw);
2127 	if (ret_val != IXGBE_SUCCESS)
2128 		goto out;
2129 
2130 	/* We need to run link autotry after the driver loads */
2131 	hw->mac.autotry_restart = TRUE;
2132 
2133 	if (ret_val == IXGBE_SUCCESS)
2134 		ret_val = ixgbe_verify_fw_version_82599(hw);
2135 out:
2136 	return ret_val;
2137 }
2138 
2139 /**
2140  *  ixgbe_identify_phy_82599 - Get physical layer module
2141  *  @hw: pointer to hardware structure
2142  *
2143  *  Determines the physical layer module found on the current adapter.
2144  *  If PHY already detected, maintains current PHY type in hw struct,
2145  *  otherwise executes the PHY detection routine.
2146  **/
2147 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2148 {
2149 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2150 
2151 	DEBUGFUNC("ixgbe_identify_phy_82599");
2152 
2153 	/* Detect PHY if not unknown - returns success if already detected. */
2154 	status = ixgbe_identify_phy_generic(hw);
2155 	if (status != IXGBE_SUCCESS) {
2156 		/* 82599 10GBASE-T requires an external PHY */
2157 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2158 			goto out;
2159 		else
2160 			status = ixgbe_identify_module_generic(hw);
2161 	}
2162 
2163 	/* Set PHY type none if no PHY detected */
2164 	if (hw->phy.type == ixgbe_phy_unknown) {
2165 		hw->phy.type = ixgbe_phy_none;
2166 		status = IXGBE_SUCCESS;
2167 	}
2168 
2169 	/* Return error if SFP module has been detected but is not supported */
2170 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2171 		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2172 
2173 out:
2174 	return status;
2175 }
2176 
2177 /**
2178  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2179  *  @hw: pointer to hardware structure
2180  *
2181  *  Determines physical layer capabilities of the current configuration.
2182  **/
2183 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2184 {
2185 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2186 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2187 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2188 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2189 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2190 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2191 	u16 ext_ability = 0;
2192 	u8 comp_codes_10g = 0;
2193 	u8 comp_codes_1g = 0;
2194 
2195 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2196 
2197 	hw->phy.ops.identify(hw);
2198 
2199 	switch (hw->phy.type) {
2200 	case ixgbe_phy_tn:
2201 	case ixgbe_phy_cu_unknown:
2202 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2203 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2204 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2205 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2206 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2207 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2208 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2209 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2210 		goto out;
2211 	default:
2212 		break;
2213 	}
2214 
2215 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2216 	case IXGBE_AUTOC_LMS_1G_AN:
2217 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2218 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2219 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2220 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2221 			goto out;
2222 		} else
2223 			/* SFI mode so read SFP module */
2224 			goto sfp_check;
2225 		break;
2226 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2227 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2228 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2229 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2230 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2231 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2232 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2233 		goto out;
2234 		break;
2235 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2236 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2237 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2238 			goto out;
2239 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2240 			goto sfp_check;
2241 		break;
2242 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2243 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2244 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2245 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2246 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2247 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2248 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2249 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2250 		goto out;
2251 		break;
2252 	default:
2253 		goto out;
2254 		break;
2255 	}
2256 
2257 sfp_check:
2258 	/* SFP check must be done last since DA modules are sometimes used to
2259 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2260 	 * Call identify_sfp because the pluggable module may have changed */
2261 	hw->phy.ops.identify_sfp(hw);
2262 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2263 		goto out;
2264 
2265 	switch (hw->phy.type) {
2266 	case ixgbe_phy_sfp_passive_tyco:
2267 	case ixgbe_phy_sfp_passive_unknown:
2268 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2269 		break;
2270 	case ixgbe_phy_sfp_ftl_active:
2271 	case ixgbe_phy_sfp_active_unknown:
2272 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2273 		break;
2274 	case ixgbe_phy_sfp_avago:
2275 	case ixgbe_phy_sfp_ftl:
2276 	case ixgbe_phy_sfp_intel:
2277 	case ixgbe_phy_sfp_unknown:
2278 		hw->phy.ops.read_i2c_eeprom(hw,
2279 		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2280 		hw->phy.ops.read_i2c_eeprom(hw,
2281 		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2282 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2283 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2284 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2285 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2286 		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2287 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2288 		else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2289 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2290 		break;
2291 	default:
2292 		break;
2293 	}
2294 
2295 out:
2296 	return physical_layer;
2297 }
2298 
2299 /**
2300  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2301  *  @hw: pointer to hardware structure
2302  *  @regval: register value to write to RXCTRL
2303  *
2304  *  Enables the Rx DMA unit for 82599
2305  **/
2306 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2307 {
2308 
2309 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2310 
2311 	/*
2312 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2313 	 * If traffic is incoming before we enable the Rx unit, it could hang
2314 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2315 	 * completely disabled prior to enabling the Rx unit.
2316 	 */
2317 
2318 	hw->mac.ops.disable_sec_rx_path(hw);
2319 
2320 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2321 
2322 	hw->mac.ops.enable_sec_rx_path(hw);
2323 
2324 	return IXGBE_SUCCESS;
2325 }
2326 
2327 /**
2328  *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
2329  *  @hw: pointer to hardware structure
2330  *
2331  *  Verifies that installed the firmware version is 0.6 or higher
2332  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2333  *
2334  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2335  *  if the FW version is not supported.
2336  **/
2337 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2338 {
2339 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2340 	u16 fw_offset, fw_ptp_cfg_offset;
2341 	u16 fw_version;
2342 
2343 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2344 
2345 	/* firmware check is only necessary for SFI devices */
2346 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2347 		status = IXGBE_SUCCESS;
2348 		goto fw_version_out;
2349 	}
2350 
2351 	/* get the offset to the Firmware Module block */
2352 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2353 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2354 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
2355 		return IXGBE_ERR_EEPROM_VERSION;
2356 	}
2357 
2358 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2359 		goto fw_version_out;
2360 
2361 	/* get the offset to the Pass Through Patch Configuration block */
2362 	if (hw->eeprom.ops.read(hw, (fw_offset +
2363 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2364 				 &fw_ptp_cfg_offset)) {
2365 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2366 			      "eeprom read at offset %d failed",
2367 			      fw_offset +
2368 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2369 		return IXGBE_ERR_EEPROM_VERSION;
2370 	}
2371 
2372 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2373 		goto fw_version_out;
2374 
2375 	/* get the firmware version */
2376 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2377 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2378 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2379 			      "eeprom read at offset %d failed",
2380 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2381 		return IXGBE_ERR_EEPROM_VERSION;
2382 	}
2383 
2384 	if (fw_version > 0x5)
2385 		status = IXGBE_SUCCESS;
2386 
2387 fw_version_out:
2388 	return status;
2389 }
2390 
2391 /**
2392  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2393  *  @hw: pointer to hardware structure
2394  *
2395  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
2396  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2397  **/
2398 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2399 {
2400 	bool lesm_enabled = FALSE;
2401 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2402 	s32 status;
2403 
2404 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2405 
2406 	/* get the offset to the Firmware Module block */
2407 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2408 
2409 	if ((status != IXGBE_SUCCESS) ||
2410 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
2411 		goto out;
2412 
2413 	/* get the offset to the LESM Parameters block */
2414 	status = hw->eeprom.ops.read(hw, (fw_offset +
2415 				     IXGBE_FW_LESM_PARAMETERS_PTR),
2416 				     &fw_lesm_param_offset);
2417 
2418 	if ((status != IXGBE_SUCCESS) ||
2419 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2420 		goto out;
2421 
2422 	/* get the lesm state word */
2423 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2424 				     IXGBE_FW_LESM_STATE_1),
2425 				     &fw_lesm_state);
2426 
2427 	if ((status == IXGBE_SUCCESS) &&
2428 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2429 		lesm_enabled = TRUE;
2430 
2431 out:
2432 	return lesm_enabled;
2433 }
2434 
2435 /**
2436  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2437  *  fastest available method
2438  *
2439  *  @hw: pointer to hardware structure
2440  *  @offset: offset of  word in EEPROM to read
2441  *  @words: number of words
2442  *  @data: word(s) read from the EEPROM
2443  *
2444  *  Retrieves 16 bit word(s) read from EEPROM
2445  **/
2446 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2447 					  u16 words, u16 *data)
2448 {
2449 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2450 	s32 ret_val = IXGBE_ERR_CONFIG;
2451 
2452 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2453 
2454 	/*
2455 	 * If EEPROM is detected and can be addressed using 14 bits,
2456 	 * use EERD otherwise use bit bang
2457 	 */
2458 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2459 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2460 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2461 							 data);
2462 	else
2463 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2464 								    words,
2465 								    data);
2466 
2467 	return ret_val;
2468 }
2469 
2470 /**
2471  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
2472  *  fastest available method
2473  *
2474  *  @hw: pointer to hardware structure
2475  *  @offset: offset of  word in the EEPROM to read
2476  *  @data: word read from the EEPROM
2477  *
2478  *  Reads a 16 bit word from the EEPROM
2479  **/
2480 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2481 				   u16 offset, u16 *data)
2482 {
2483 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2484 	s32 ret_val = IXGBE_ERR_CONFIG;
2485 
2486 	DEBUGFUNC("ixgbe_read_eeprom_82599");
2487 
2488 	/*
2489 	 * If EEPROM is detected and can be addressed using 14 bits,
2490 	 * use EERD otherwise use bit bang
2491 	 */
2492 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2493 	    (offset <= IXGBE_EERD_MAX_ADDR))
2494 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2495 	else
2496 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2497 
2498 	return ret_val;
2499 }
2500 
2501 /**
2502  * ixgbe_reset_pipeline_82599 - perform pipeline reset
2503  *
2504  *  @hw: pointer to hardware structure
2505  *
2506  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2507  * full pipeline reset
2508  **/
2509 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2510 {
2511 	s32 ret_val;
2512 	u32 anlp1_reg = 0;
2513 	u32 i, autoc_reg, autoc2_reg;
2514 
2515 	/* Enable link if disabled in NVM */
2516 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2517 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2518 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2519 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2520 		IXGBE_WRITE_FLUSH(hw);
2521 	}
2522 
2523 	autoc_reg = hw->mac.cached_autoc;
2524 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2525 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2526 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
2527 	/* Wait for AN to leave state 0 */
2528 	for (i = 0; i < 10; i++) {
2529 		msec_delay(4);
2530 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2531 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2532 			break;
2533 	}
2534 
2535 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2536 		DEBUGOUT("auto negotiation not completed\n");
2537 		ret_val = IXGBE_ERR_RESET_FAILED;
2538 		goto reset_pipeline_out;
2539 	}
2540 
2541 	ret_val = IXGBE_SUCCESS;
2542 
2543 reset_pipeline_out:
2544 	/* Write AUTOC register with original LMS field and Restart_AN */
2545 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2546 	IXGBE_WRITE_FLUSH(hw);
2547 
2548 	return ret_val;
2549 }
2550 
2551 
2552 
2553