xref: /freebsd/sys/dev/e1000/e1000_82575.c (revision aa0a1e58)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2010, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /*
36  * 82575EB Gigabit Network Connection
37  * 82575EB Gigabit Backplane Connection
38  * 82575GB Gigabit Network Connection
39  * 82576 Gigabit Network Connection
40  * 82576 Quad Port Gigabit Mezzanine Adapter
41  */
42 
43 #include "e1000_api.h"
44 
45 static s32  e1000_init_phy_params_82575(struct e1000_hw *hw);
46 static s32  e1000_init_mac_params_82575(struct e1000_hw *hw);
47 static s32  e1000_acquire_phy_82575(struct e1000_hw *hw);
48 static void e1000_release_phy_82575(struct e1000_hw *hw);
49 static s32  e1000_acquire_nvm_82575(struct e1000_hw *hw);
50 static void e1000_release_nvm_82575(struct e1000_hw *hw);
51 static s32  e1000_check_for_link_82575(struct e1000_hw *hw);
52 static s32  e1000_get_cfg_done_82575(struct e1000_hw *hw);
53 static s32  e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
54                                          u16 *duplex);
55 static s32  e1000_init_hw_82575(struct e1000_hw *hw);
56 static s32  e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
57 static s32  e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
58                                            u16 *data);
59 static s32  e1000_reset_hw_82575(struct e1000_hw *hw);
60 static s32  e1000_reset_hw_82580(struct e1000_hw *hw);
61 static s32  e1000_read_phy_reg_82580(struct e1000_hw *hw,
62                                     u32 offset, u16 *data);
63 static s32  e1000_write_phy_reg_82580(struct e1000_hw *hw,
64                                      u32 offset, u16 data);
65 static s32  e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
66                                           bool active);
67 static s32  e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
68                                           bool active);
69 static s32  e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
70                                           bool active);
71 static s32  e1000_setup_copper_link_82575(struct e1000_hw *hw);
72 static s32  e1000_setup_serdes_link_82575(struct e1000_hw *hw);
73 static s32  e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
74 static s32  e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
75                                             u32 offset, u16 data);
76 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
77 static s32  e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
78 static s32  e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
79                                                  u16 *speed, u16 *duplex);
80 static s32  e1000_get_phy_id_82575(struct e1000_hw *hw);
81 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
82 static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
83 static s32  e1000_reset_init_script_82575(struct e1000_hw *hw);
84 static s32  e1000_read_mac_addr_82575(struct e1000_hw *hw);
85 static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
86 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
87 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
88 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
89 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
90 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
91 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
92 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
93 static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
94 						u16 offset);
95 static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
96 						u16 offset);
97 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
98 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
99 
100 static const u16 e1000_82580_rxpbs_table[] =
101 	{ 36, 72, 144, 1, 2, 4, 8, 16,
102 	  35, 70, 140 };
103 #define E1000_82580_RXPBS_TABLE_SIZE \
104 	(sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
105 
106 
107 /**
108  *  e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
109  *  @hw: pointer to the HW structure
110  *
111  *  Called to determine if the I2C pins are being used for I2C or as an
112  *  external MDIO interface since the two options are mutually exclusive.
113  **/
114 static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
115 {
116 	u32 reg = 0;
117 	bool ext_mdio = FALSE;
118 
119 	DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
120 
121 	switch (hw->mac.type) {
122 	case e1000_82575:
123 	case e1000_82576:
124 		reg = E1000_READ_REG(hw, E1000_MDIC);
125 		ext_mdio = !!(reg & E1000_MDIC_DEST);
126 		break;
127 	case e1000_82580:
128 	case e1000_i350:
129 		reg = E1000_READ_REG(hw, E1000_MDICNFG);
130 		ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
131 		break;
132 	default:
133 		break;
134 	}
135 	return ext_mdio;
136 }
137 
138 /**
139  *  e1000_init_phy_params_82575 - Init PHY func ptrs.
140  *  @hw: pointer to the HW structure
141  **/
142 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
143 {
144 	struct e1000_phy_info *phy = &hw->phy;
145 	s32 ret_val = E1000_SUCCESS;
146 	u32 ctrl_ext;
147 
148 	DEBUGFUNC("e1000_init_phy_params_82575");
149 
150 	if (hw->phy.media_type != e1000_media_type_copper) {
151 		phy->type = e1000_phy_none;
152 		goto out;
153 	}
154 
155 	phy->ops.power_up   = e1000_power_up_phy_copper;
156 	phy->ops.power_down = e1000_power_down_phy_copper_82575;
157 
158 	phy->autoneg_mask           = AUTONEG_ADVERTISE_SPEED_DEFAULT;
159 	phy->reset_delay_us         = 100;
160 
161 	phy->ops.acquire            = e1000_acquire_phy_82575;
162 	phy->ops.check_reset_block  = e1000_check_reset_block_generic;
163 	phy->ops.commit             = e1000_phy_sw_reset_generic;
164 	phy->ops.get_cfg_done       = e1000_get_cfg_done_82575;
165 	phy->ops.release            = e1000_release_phy_82575;
166 
167 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
168 
169 	if (e1000_sgmii_active_82575(hw)) {
170 		phy->ops.reset      = e1000_phy_hw_reset_sgmii_82575;
171 		ctrl_ext |= E1000_CTRL_I2C_ENA;
172 	} else {
173 		phy->ops.reset      = e1000_phy_hw_reset_generic;
174 		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
175 	}
176 
177 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
178 	e1000_reset_mdicnfg_82580(hw);
179 
180 	if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
181 		phy->ops.read_reg   = e1000_read_phy_reg_sgmii_82575;
182 		phy->ops.write_reg  = e1000_write_phy_reg_sgmii_82575;
183 	} else if (hw->mac.type >= e1000_82580) {
184 		phy->ops.read_reg   = e1000_read_phy_reg_82580;
185 		phy->ops.write_reg  = e1000_write_phy_reg_82580;
186 	} else {
187 		phy->ops.read_reg   = e1000_read_phy_reg_igp;
188 		phy->ops.write_reg  = e1000_write_phy_reg_igp;
189 	}
190 
191 	/* Set phy->phy_addr and phy->id. */
192 	ret_val = e1000_get_phy_id_82575(hw);
193 
194 	/* Verify phy id and set remaining function pointers */
195 	switch (phy->id) {
196 	case I347AT4_E_PHY_ID:
197 	case M88E1112_E_PHY_ID:
198 	case M88E1340M_E_PHY_ID:
199 	case M88E1111_I_PHY_ID:
200 		phy->type                   = e1000_phy_m88;
201 		phy->ops.check_polarity     = e1000_check_polarity_m88;
202 		phy->ops.get_info           = e1000_get_phy_info_m88;
203 		if (phy->id == I347AT4_E_PHY_ID ||
204 		    phy->id == M88E1112_E_PHY_ID ||
205 		    phy->id == M88E1340M_E_PHY_ID)
206 			phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
207 		else
208 			phy->ops.get_cable_length = e1000_get_cable_length_m88;
209 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
210 		break;
211 	case IGP03E1000_E_PHY_ID:
212 	case IGP04E1000_E_PHY_ID:
213 		phy->type                   = e1000_phy_igp_3;
214 		phy->ops.check_polarity     = e1000_check_polarity_igp;
215 		phy->ops.get_info           = e1000_get_phy_info_igp;
216 		phy->ops.get_cable_length   = e1000_get_cable_length_igp_2;
217 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
218 		phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82575;
219 		phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_generic;
220 		break;
221 	case I82580_I_PHY_ID:
222 	case I350_I_PHY_ID:
223 		phy->type                   = e1000_phy_82580;
224 		phy->ops.check_polarity     = e1000_check_polarity_82577;
225 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577;
226 		phy->ops.get_cable_length   = e1000_get_cable_length_82577;
227 		phy->ops.get_info           = e1000_get_phy_info_82577;
228 		phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82580;
229 		phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_82580;
230 		break;
231 	default:
232 		ret_val = -E1000_ERR_PHY;
233 		goto out;
234 	}
235 
236 out:
237 	return ret_val;
238 }
239 
240 /**
241  *  e1000_init_nvm_params_82575 - Init NVM func ptrs.
242  *  @hw: pointer to the HW structure
243  **/
244 s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
245 {
246 	struct e1000_nvm_info *nvm = &hw->nvm;
247 	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
248 	u16 size;
249 
250 	DEBUGFUNC("e1000_init_nvm_params_82575");
251 
252 	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
253 	             E1000_EECD_SIZE_EX_SHIFT);
254 	/*
255 	 * Added to a constant, "size" becomes the left-shift value
256 	 * for setting word_size.
257 	 */
258 	size += NVM_WORD_SIZE_BASE_SHIFT;
259 
260 	nvm->word_size = 1 << size;
261 	nvm->opcode_bits        = 8;
262 	nvm->delay_usec         = 1;
263 	switch (nvm->override) {
264 	case e1000_nvm_override_spi_large:
265 		nvm->page_size    = 32;
266 		nvm->address_bits = 16;
267 		break;
268 	case e1000_nvm_override_spi_small:
269 		nvm->page_size    = 8;
270 		nvm->address_bits = 8;
271 		break;
272 	default:
273 		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
274 		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
275 		break;
276 	}
277 
278 	nvm->type = e1000_nvm_eeprom_spi;
279 
280 	if (nvm->word_size == (1 << 15))
281 		nvm->page_size = 128;
282 
283 	/* Function Pointers */
284 	nvm->ops.acquire    = e1000_acquire_nvm_82575;
285 	nvm->ops.release    = e1000_release_nvm_82575;
286 	if (nvm->word_size < (1 << 15))
287 		nvm->ops.read    = e1000_read_nvm_eerd;
288 	else
289 		nvm->ops.read    = e1000_read_nvm_spi;
290 
291 	nvm->ops.write              = e1000_write_nvm_spi;
292 	nvm->ops.validate           = e1000_validate_nvm_checksum_generic;
293 	nvm->ops.update             = e1000_update_nvm_checksum_generic;
294 	nvm->ops.valid_led_default  = e1000_valid_led_default_82575;
295 
296 	/* override genric family function pointers for specific descendants */
297 	switch (hw->mac.type) {
298 	case e1000_82580:
299 		nvm->ops.validate = e1000_validate_nvm_checksum_82580;
300 		nvm->ops.update = e1000_update_nvm_checksum_82580;
301 		break;
302 	case e1000_i350:
303 		nvm->ops.validate = e1000_validate_nvm_checksum_i350;
304 		nvm->ops.update = e1000_update_nvm_checksum_i350;
305 		break;
306 	default:
307 		break;
308 	}
309 
310 	return E1000_SUCCESS;
311 }
312 
313 /**
314  *  e1000_init_mac_params_82575 - Init MAC func ptrs.
315  *  @hw: pointer to the HW structure
316  **/
317 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
318 {
319 	struct e1000_mac_info *mac = &hw->mac;
320 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
321 	u32 ctrl_ext = 0;
322 
323 	DEBUGFUNC("e1000_init_mac_params_82575");
324 
325 	/* Set media type */
326         /*
327 	 * The 82575 uses bits 22:23 for link mode. The mode can be changed
328          * based on the EEPROM. We cannot rely upon device ID. There
329          * is no distinguishable difference between fiber and internal
330          * SerDes mode on the 82575. There can be an external PHY attached
331          * on the SGMII interface. For this, we'll set sgmii_active to TRUE.
332          */
333 	hw->phy.media_type = e1000_media_type_copper;
334 	dev_spec->sgmii_active = FALSE;
335 
336 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
337 	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
338 	case E1000_CTRL_EXT_LINK_MODE_SGMII:
339 		dev_spec->sgmii_active = TRUE;
340 		break;
341 	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
342 	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
343 		hw->phy.media_type = e1000_media_type_internal_serdes;
344 		break;
345 	default:
346 		break;
347 	}
348 
349 	/* Set mta register count */
350 	mac->mta_reg_count = 128;
351 	/* Set uta register count */
352 	mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
353 	/* Set rar entry count */
354 	mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
355 	if (mac->type == e1000_82576)
356 		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
357 	if (mac->type == e1000_82580)
358 		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
359 	if (mac->type == e1000_i350) {
360 		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
361 		/* Enable EEE default settings for i350 */
362 		dev_spec->eee_disable = FALSE;
363 	}
364 
365 	/* Set if part includes ASF firmware */
366 	mac->asf_firmware_present = TRUE;
367 	/* FWSM register */
368 	mac->has_fwsm = TRUE;
369 	/* ARC supported; valid only if manageability features are enabled. */
370 	mac->arc_subsystem_valid =
371 	        (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
372 	                ? TRUE : FALSE;
373 
374 	/* Function pointers */
375 
376 	/* bus type/speed/width */
377 	mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
378 	/* reset */
379 	if (mac->type >= e1000_82580)
380 		mac->ops.reset_hw = e1000_reset_hw_82580;
381 	else
382 	mac->ops.reset_hw = e1000_reset_hw_82575;
383 	/* hw initialization */
384 	mac->ops.init_hw = e1000_init_hw_82575;
385 	/* link setup */
386 	mac->ops.setup_link = e1000_setup_link_generic;
387 	/* physical interface link setup */
388 	mac->ops.setup_physical_interface =
389 	        (hw->phy.media_type == e1000_media_type_copper)
390 	                ? e1000_setup_copper_link_82575
391 	                : e1000_setup_serdes_link_82575;
392 	/* physical interface shutdown */
393 	mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
394 	/* physical interface power up */
395 	mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
396 	/* check for link */
397 	mac->ops.check_for_link = e1000_check_for_link_82575;
398 	/* receive address register setting */
399 	mac->ops.rar_set = e1000_rar_set_generic;
400 	/* read mac address */
401 	mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
402 	/* configure collision distance */
403 	mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
404 	/* multicast address update */
405 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
406 	/* writing VFTA */
407 	mac->ops.write_vfta = e1000_write_vfta_generic;
408 	/* clearing VFTA */
409 	mac->ops.clear_vfta = e1000_clear_vfta_generic;
410 	/* ID LED init */
411 	mac->ops.id_led_init = e1000_id_led_init_generic;
412 	/* blink LED */
413 	mac->ops.blink_led = e1000_blink_led_generic;
414 	/* setup LED */
415 	mac->ops.setup_led = e1000_setup_led_generic;
416 	/* cleanup LED */
417 	mac->ops.cleanup_led = e1000_cleanup_led_generic;
418 	/* turn on/off LED */
419 	mac->ops.led_on = e1000_led_on_generic;
420 	mac->ops.led_off = e1000_led_off_generic;
421 	/* clear hardware counters */
422 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
423 	/* link info */
424 	mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
425 
426 	/* set lan id for port to determine which phy lock to use */
427 	hw->mac.ops.set_lan_id(hw);
428 
429 	return E1000_SUCCESS;
430 }
431 
432 /**
433  *  e1000_init_function_pointers_82575 - Init func ptrs.
434  *  @hw: pointer to the HW structure
435  *
436  *  Called to initialize all function pointers and parameters.
437  **/
438 void e1000_init_function_pointers_82575(struct e1000_hw *hw)
439 {
440 	DEBUGFUNC("e1000_init_function_pointers_82575");
441 
442 	hw->mac.ops.init_params = e1000_init_mac_params_82575;
443 	hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
444 	hw->phy.ops.init_params = e1000_init_phy_params_82575;
445 	hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
446 }
447 
448 /**
449  *  e1000_acquire_phy_82575 - Acquire rights to access PHY
450  *  @hw: pointer to the HW structure
451  *
452  *  Acquire access rights to the correct PHY.
453  **/
454 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
455 {
456 	u16 mask = E1000_SWFW_PHY0_SM;
457 
458 	DEBUGFUNC("e1000_acquire_phy_82575");
459 
460 	if (hw->bus.func == E1000_FUNC_1)
461 		mask = E1000_SWFW_PHY1_SM;
462 	else if (hw->bus.func == E1000_FUNC_2)
463 		mask = E1000_SWFW_PHY2_SM;
464 	else if (hw->bus.func == E1000_FUNC_3)
465 		mask = E1000_SWFW_PHY3_SM;
466 
467 	return e1000_acquire_swfw_sync_82575(hw, mask);
468 }
469 
470 /**
471  *  e1000_release_phy_82575 - Release rights to access PHY
472  *  @hw: pointer to the HW structure
473  *
474  *  A wrapper to release access rights to the correct PHY.
475  **/
476 static void e1000_release_phy_82575(struct e1000_hw *hw)
477 {
478 	u16 mask = E1000_SWFW_PHY0_SM;
479 
480 	DEBUGFUNC("e1000_release_phy_82575");
481 
482 	if (hw->bus.func == E1000_FUNC_1)
483 		mask = E1000_SWFW_PHY1_SM;
484 	else if (hw->bus.func == E1000_FUNC_2)
485 		mask = E1000_SWFW_PHY2_SM;
486 	else if (hw->bus.func == E1000_FUNC_3)
487 		mask = E1000_SWFW_PHY3_SM;
488 
489 	e1000_release_swfw_sync_82575(hw, mask);
490 }
491 
492 /**
493  *  e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
494  *  @hw: pointer to the HW structure
495  *  @offset: register offset to be read
496  *  @data: pointer to the read data
497  *
498  *  Reads the PHY register at offset using the serial gigabit media independent
499  *  interface and stores the retrieved information in data.
500  **/
501 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
502                                           u16 *data)
503 {
504 	s32 ret_val = -E1000_ERR_PARAM;
505 
506 	DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
507 
508 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
509 		DEBUGOUT1("PHY Address %u is out of range\n", offset);
510 		goto out;
511 	}
512 
513 	ret_val = hw->phy.ops.acquire(hw);
514 	if (ret_val)
515 		goto out;
516 
517 	ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
518 
519 	hw->phy.ops.release(hw);
520 
521 out:
522 	return ret_val;
523 }
524 
525 /**
526  *  e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
527  *  @hw: pointer to the HW structure
528  *  @offset: register offset to write to
529  *  @data: data to write at register offset
530  *
531  *  Writes the data to PHY register at the offset using the serial gigabit
532  *  media independent interface.
533  **/
534 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
535                                            u16 data)
536 {
537 	s32 ret_val = -E1000_ERR_PARAM;
538 
539 	DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
540 
541 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
542 		DEBUGOUT1("PHY Address %d is out of range\n", offset);
543 		goto out;
544 	}
545 
546 	ret_val = hw->phy.ops.acquire(hw);
547 	if (ret_val)
548 		goto out;
549 
550 	ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
551 
552 	hw->phy.ops.release(hw);
553 
554 out:
555 	return ret_val;
556 }
557 
558 /**
559  *  e1000_get_phy_id_82575 - Retrieve PHY addr and id
560  *  @hw: pointer to the HW structure
561  *
562  *  Retrieves the PHY address and ID for both PHY's which do and do not use
563  *  sgmi interface.
564  **/
565 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
566 {
567 	struct e1000_phy_info *phy = &hw->phy;
568 	s32  ret_val = E1000_SUCCESS;
569 	u16 phy_id;
570 	u32 ctrl_ext;
571 	u32 mdic;
572 
573 	DEBUGFUNC("e1000_get_phy_id_82575");
574 
575 	/*
576 	 * For SGMII PHYs, we try the list of possible addresses until
577 	 * we find one that works.  For non-SGMII PHYs
578 	 * (e.g. integrated copper PHYs), an address of 1 should
579 	 * work.  The result of this function should mean phy->phy_addr
580 	 * and phy->id are set correctly.
581 	 */
582 	if (!e1000_sgmii_active_82575(hw)) {
583 		phy->addr = 1;
584 		ret_val = e1000_get_phy_id(hw);
585 		goto out;
586 	}
587 
588 	if (e1000_sgmii_uses_mdio_82575(hw)) {
589 		switch (hw->mac.type) {
590 		case e1000_82575:
591 		case e1000_82576:
592 			mdic = E1000_READ_REG(hw, E1000_MDIC);
593 			mdic &= E1000_MDIC_PHY_MASK;
594 			phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
595 			break;
596 		case e1000_82580:
597 		case e1000_i350:
598 			mdic = E1000_READ_REG(hw, E1000_MDICNFG);
599 			mdic &= E1000_MDICNFG_PHY_MASK;
600 			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
601 			break;
602 		default:
603 			ret_val = -E1000_ERR_PHY;
604 			goto out;
605 			break;
606 		}
607 		ret_val = e1000_get_phy_id(hw);
608 		goto out;
609 	}
610 
611 	/* Power on sgmii phy if it is disabled */
612 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
613 	E1000_WRITE_REG(hw, E1000_CTRL_EXT,
614 	                ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
615 	E1000_WRITE_FLUSH(hw);
616 	msec_delay(300);
617 
618 	/*
619 	 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
620 	 * Therefore, we need to test 1-7
621 	 */
622 	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
623 		ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
624 		if (ret_val == E1000_SUCCESS) {
625 			DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
626 			          phy_id,
627 			          phy->addr);
628 			/*
629 			 * At the time of this writing, The M88 part is
630 			 * the only supported SGMII PHY product.
631 			 */
632 			if (phy_id == M88_VENDOR)
633 				break;
634 		} else {
635 			DEBUGOUT1("PHY address %u was unreadable\n",
636 			          phy->addr);
637 		}
638 	}
639 
640 	/* A valid PHY type couldn't be found. */
641 	if (phy->addr == 8) {
642 		phy->addr = 0;
643 		ret_val = -E1000_ERR_PHY;
644 	} else {
645 		ret_val = e1000_get_phy_id(hw);
646 	}
647 
648 	/* restore previous sfp cage power state */
649 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
650 
651 out:
652 	return ret_val;
653 }
654 
655 /**
656  *  e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
657  *  @hw: pointer to the HW structure
658  *
659  *  Resets the PHY using the serial gigabit media independent interface.
660  **/
661 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
662 {
663 	s32 ret_val = E1000_SUCCESS;
664 
665 	DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
666 
667 	/*
668 	 * This isn't a TRUE "hard" reset, but is the only reset
669 	 * available to us at this time.
670 	 */
671 
672 	DEBUGOUT("Soft resetting SGMII attached PHY...\n");
673 
674 	if (!(hw->phy.ops.write_reg))
675 		goto out;
676 
677 	/*
678 	 * SFP documentation requires the following to configure the SPF module
679 	 * to work on SGMII.  No further documentation is given.
680 	 */
681 	ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
682 	if (ret_val)
683 		goto out;
684 
685 	ret_val = hw->phy.ops.commit(hw);
686 
687 out:
688 	return ret_val;
689 }
690 
691 /**
692  *  e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
693  *  @hw: pointer to the HW structure
694  *  @active: TRUE to enable LPLU, FALSE to disable
695  *
696  *  Sets the LPLU D0 state according to the active flag.  When
697  *  activating LPLU this function also disables smart speed
698  *  and vice versa.  LPLU will not be activated unless the
699  *  device autonegotiation advertisement meets standards of
700  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
701  *  This is a function pointer entry point only called by
702  *  PHY setup routines.
703  **/
704 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
705 {
706 	struct e1000_phy_info *phy = &hw->phy;
707 	s32 ret_val = E1000_SUCCESS;
708 	u16 data;
709 
710 	DEBUGFUNC("e1000_set_d0_lplu_state_82575");
711 
712 	if (!(hw->phy.ops.read_reg))
713 		goto out;
714 
715 	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
716 	if (ret_val)
717 		goto out;
718 
719 	if (active) {
720 		data |= IGP02E1000_PM_D0_LPLU;
721 		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
722 		                             data);
723 		if (ret_val)
724 			goto out;
725 
726 		/* When LPLU is enabled, we should disable SmartSpeed */
727 		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
728 		                            &data);
729 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
730 		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
731 		                             data);
732 		if (ret_val)
733 			goto out;
734 	} else {
735 		data &= ~IGP02E1000_PM_D0_LPLU;
736 		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
737 		                             data);
738 		/*
739 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
740 		 * during Dx states where the power conservation is most
741 		 * important.  During driver activity we should enable
742 		 * SmartSpeed, so performance is maintained.
743 		 */
744 		if (phy->smart_speed == e1000_smart_speed_on) {
745 			ret_val = phy->ops.read_reg(hw,
746 			                            IGP01E1000_PHY_PORT_CONFIG,
747 			                            &data);
748 			if (ret_val)
749 				goto out;
750 
751 			data |= IGP01E1000_PSCFR_SMART_SPEED;
752 			ret_val = phy->ops.write_reg(hw,
753 			                             IGP01E1000_PHY_PORT_CONFIG,
754 			                             data);
755 			if (ret_val)
756 				goto out;
757 		} else if (phy->smart_speed == e1000_smart_speed_off) {
758 			ret_val = phy->ops.read_reg(hw,
759 			                            IGP01E1000_PHY_PORT_CONFIG,
760 			                            &data);
761 			if (ret_val)
762 				goto out;
763 
764 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
765 			ret_val = phy->ops.write_reg(hw,
766 			                             IGP01E1000_PHY_PORT_CONFIG,
767 			                             data);
768 			if (ret_val)
769 				goto out;
770 		}
771 	}
772 
773 out:
774 	return ret_val;
775 }
776 
777 /**
778  *  e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
779  *  @hw: pointer to the HW structure
780  *  @active: TRUE to enable LPLU, FALSE to disable
781  *
782  *  Sets the LPLU D0 state according to the active flag.  When
783  *  activating LPLU this function also disables smart speed
784  *  and vice versa.  LPLU will not be activated unless the
785  *  device autonegotiation advertisement meets standards of
786  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
787  *  This is a function pointer entry point only called by
788  *  PHY setup routines.
789  **/
790 static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
791 {
792 	struct e1000_phy_info *phy = &hw->phy;
793 	s32 ret_val = E1000_SUCCESS;
794 	u16 data;
795 
796 	DEBUGFUNC("e1000_set_d0_lplu_state_82580");
797 
798 	data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
799 
800 	if (active) {
801 		data |= E1000_82580_PM_D0_LPLU;
802 
803 		/* When LPLU is enabled, we should disable SmartSpeed */
804 		data &= ~E1000_82580_PM_SPD;
805 	} else {
806 		data &= ~E1000_82580_PM_D0_LPLU;
807 
808 		/*
809 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
810 		 * during Dx states where the power conservation is most
811 		 * important.  During driver activity we should enable
812 		 * SmartSpeed, so performance is maintained.
813 		 */
814 		if (phy->smart_speed == e1000_smart_speed_on) {
815 			data |= E1000_82580_PM_SPD;
816 		} else if (phy->smart_speed == e1000_smart_speed_off) {
817 			data &= ~E1000_82580_PM_SPD;
818 		}
819 	}
820 
821 	E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
822 	return ret_val;
823 }
824 
825 /**
826  *  e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
827  *  @hw: pointer to the HW structure
828  *  @active: boolean used to enable/disable lplu
829  *
830  *  Success returns 0, Failure returns 1
831  *
832  *  The low power link up (lplu) state is set to the power management level D3
833  *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
834  *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
835  *  is used during Dx states where the power conservation is most important.
836  *  During driver activity, SmartSpeed should be enabled so performance is
837  *  maintained.
838  **/
839 s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
840 {
841 	struct e1000_phy_info *phy = &hw->phy;
842 	s32 ret_val = E1000_SUCCESS;
843 	u16 data;
844 
845 	DEBUGFUNC("e1000_set_d3_lplu_state_82580");
846 
847 	data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
848 
849 	if (!active) {
850 		data &= ~E1000_82580_PM_D3_LPLU;
851 		/*
852 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
853 		 * during Dx states where the power conservation is most
854 		 * important.  During driver activity we should enable
855 		 * SmartSpeed, so performance is maintained.
856 		 */
857 		if (phy->smart_speed == e1000_smart_speed_on) {
858 			data |= E1000_82580_PM_SPD;
859 		} else if (phy->smart_speed == e1000_smart_speed_off) {
860 			data &= ~E1000_82580_PM_SPD;
861 		}
862 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
863 	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
864 	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
865 		data |= E1000_82580_PM_D3_LPLU;
866 		/* When LPLU is enabled, we should disable SmartSpeed */
867 		data &= ~E1000_82580_PM_SPD;
868 	}
869 
870 	E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
871 	return ret_val;
872 }
873 
874 /**
875  *  e1000_acquire_nvm_82575 - Request for access to EEPROM
876  *  @hw: pointer to the HW structure
877  *
878  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
879  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
880  *  Return successful if access grant bit set, else clear the request for
881  *  EEPROM access and return -E1000_ERR_NVM (-1).
882  **/
883 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
884 {
885 	s32 ret_val;
886 
887 	DEBUGFUNC("e1000_acquire_nvm_82575");
888 
889 	ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
890 	if (ret_val)
891 		goto out;
892 	ret_val = e1000_acquire_nvm_generic(hw);
893 	if (ret_val)
894 		e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
895 
896 out:
897 	return ret_val;
898 }
899 
900 /**
901  *  e1000_release_nvm_82575 - Release exclusive access to EEPROM
902  *  @hw: pointer to the HW structure
903  *
904  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
905  *  then release the semaphores acquired.
906  **/
907 static void e1000_release_nvm_82575(struct e1000_hw *hw)
908 {
909 	DEBUGFUNC("e1000_release_nvm_82575");
910 
911 	e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
912 }
913 
914 /**
915  *  e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
916  *  @hw: pointer to the HW structure
917  *  @mask: specifies which semaphore to acquire
918  *
919  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
920  *  will also specify which port we're acquiring the lock for.
921  **/
922 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
923 {
924 	u32 swfw_sync;
925 	u32 swmask = mask;
926 	u32 fwmask = mask << 16;
927 	s32 ret_val = E1000_SUCCESS;
928 	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
929 
930 	DEBUGFUNC("e1000_acquire_swfw_sync_82575");
931 
932 	while (i < timeout) {
933 		if (e1000_get_hw_semaphore_generic(hw)) {
934 			ret_val = -E1000_ERR_SWFW_SYNC;
935 			goto out;
936 		}
937 
938 		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
939 		if (!(swfw_sync & (fwmask | swmask)))
940 			break;
941 
942 		/*
943 		 * Firmware currently using resource (fwmask)
944 		 * or other software thread using resource (swmask)
945 		 */
946 		e1000_put_hw_semaphore_generic(hw);
947 		msec_delay_irq(5);
948 		i++;
949 	}
950 
951 	if (i == timeout) {
952 		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
953 		ret_val = -E1000_ERR_SWFW_SYNC;
954 		goto out;
955 	}
956 
957 	swfw_sync |= swmask;
958 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
959 
960 	e1000_put_hw_semaphore_generic(hw);
961 
962 out:
963 	return ret_val;
964 }
965 
966 /**
967  *  e1000_release_swfw_sync_82575 - Release SW/FW semaphore
968  *  @hw: pointer to the HW structure
969  *  @mask: specifies which semaphore to acquire
970  *
971  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
972  *  will also specify which port we're releasing the lock for.
973  **/
974 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
975 {
976 	u32 swfw_sync;
977 
978 	DEBUGFUNC("e1000_release_swfw_sync_82575");
979 
980 	while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
981 	/* Empty */
982 
983 	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
984 	swfw_sync &= ~mask;
985 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
986 
987 	e1000_put_hw_semaphore_generic(hw);
988 }
989 
990 /**
991  *  e1000_get_cfg_done_82575 - Read config done bit
992  *  @hw: pointer to the HW structure
993  *
994  *  Read the management control register for the config done bit for
995  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
996  *  to read the config done bit, so an error is *ONLY* logged and returns
997  *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
998  *  would not be able to be reset or change link.
999  **/
1000 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
1001 {
1002 	s32 timeout = PHY_CFG_TIMEOUT;
1003 	s32 ret_val = E1000_SUCCESS;
1004 	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1005 
1006 	DEBUGFUNC("e1000_get_cfg_done_82575");
1007 
1008 	if (hw->bus.func == E1000_FUNC_1)
1009 		mask = E1000_NVM_CFG_DONE_PORT_1;
1010 	else if (hw->bus.func == E1000_FUNC_2)
1011 		mask = E1000_NVM_CFG_DONE_PORT_2;
1012 	else if (hw->bus.func == E1000_FUNC_3)
1013 		mask = E1000_NVM_CFG_DONE_PORT_3;
1014 	while (timeout) {
1015 		if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
1016 			break;
1017 		msec_delay(1);
1018 		timeout--;
1019 	}
1020 	if (!timeout)
1021 		DEBUGOUT("MNG configuration cycle has not completed.\n");
1022 
1023 	/* If EEPROM is not marked present, init the PHY manually */
1024 	if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
1025 	    (hw->phy.type == e1000_phy_igp_3))
1026 		e1000_phy_init_script_igp3(hw);
1027 
1028 	return ret_val;
1029 }
1030 
1031 /**
1032  *  e1000_get_link_up_info_82575 - Get link speed/duplex info
1033  *  @hw: pointer to the HW structure
1034  *  @speed: stores the current speed
1035  *  @duplex: stores the current duplex
1036  *
1037  *  This is a wrapper function, if using the serial gigabit media independent
1038  *  interface, use PCS to retrieve the link speed and duplex information.
1039  *  Otherwise, use the generic function to get the link speed and duplex info.
1040  **/
1041 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
1042                                         u16 *duplex)
1043 {
1044 	s32 ret_val;
1045 
1046 	DEBUGFUNC("e1000_get_link_up_info_82575");
1047 
1048 	if (hw->phy.media_type != e1000_media_type_copper)
1049 		ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
1050 		                                               duplex);
1051 	else
1052 		ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
1053 		                                                    duplex);
1054 
1055 	return ret_val;
1056 }
1057 
1058 /**
1059  *  e1000_check_for_link_82575 - Check for link
1060  *  @hw: pointer to the HW structure
1061  *
1062  *  If sgmii is enabled, then use the pcs register to determine link, otherwise
1063  *  use the generic interface for determining link.
1064  **/
1065 static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
1066 {
1067 	s32 ret_val;
1068 	u16 speed, duplex;
1069 
1070 	DEBUGFUNC("e1000_check_for_link_82575");
1071 
1072 	if (hw->phy.media_type != e1000_media_type_copper) {
1073 		ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
1074 		                                               &duplex);
1075 		/*
1076 		 * Use this flag to determine if link needs to be checked or
1077 		 * not.  If we have link clear the flag so that we do not
1078 		 * continue to check for link.
1079 		 */
1080 		hw->mac.get_link_status = !hw->mac.serdes_has_link;
1081 	} else {
1082 		ret_val = e1000_check_for_copper_link_generic(hw);
1083 	}
1084 
1085 	return ret_val;
1086 }
1087 
1088 /**
1089  *  e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1090  *  @hw: pointer to the HW structure
1091  **/
1092 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
1093 {
1094 	u32 reg;
1095 
1096 	DEBUGFUNC("e1000_power_up_serdes_link_82575");
1097 
1098 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1099 	    !e1000_sgmii_active_82575(hw))
1100 		return;
1101 
1102 	/* Enable PCS to turn on link */
1103 	reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1104 	reg |= E1000_PCS_CFG_PCS_EN;
1105 	E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1106 
1107 	/* Power up the laser */
1108 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1109 	reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1110 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1111 
1112 	/* flush the write to verify completion */
1113 	E1000_WRITE_FLUSH(hw);
1114 	msec_delay(1);
1115 }
1116 
1117 /**
1118  *  e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1119  *  @hw: pointer to the HW structure
1120  *  @speed: stores the current speed
1121  *  @duplex: stores the current duplex
1122  *
1123  *  Using the physical coding sub-layer (PCS), retrieve the current speed and
1124  *  duplex, then store the values in the pointers provided.
1125  **/
1126 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
1127                                                 u16 *speed, u16 *duplex)
1128 {
1129 	struct e1000_mac_info *mac = &hw->mac;
1130 	u32 pcs;
1131 
1132 	DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
1133 
1134 	/* Set up defaults for the return values of this function */
1135 	mac->serdes_has_link = FALSE;
1136 	*speed = 0;
1137 	*duplex = 0;
1138 
1139 	/*
1140 	 * Read the PCS Status register for link state. For non-copper mode,
1141 	 * the status register is not accurate. The PCS status register is
1142 	 * used instead.
1143 	 */
1144 	pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
1145 
1146 	/*
1147 	 * The link up bit determines when link is up on autoneg. The sync ok
1148 	 * gets set once both sides sync up and agree upon link. Stable link
1149 	 * can be determined by checking for both link up and link sync ok
1150 	 */
1151 	if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1152 		mac->serdes_has_link = TRUE;
1153 
1154 		/* Detect and store PCS speed */
1155 		if (pcs & E1000_PCS_LSTS_SPEED_1000) {
1156 			*speed = SPEED_1000;
1157 		} else if (pcs & E1000_PCS_LSTS_SPEED_100) {
1158 			*speed = SPEED_100;
1159 		} else {
1160 			*speed = SPEED_10;
1161 		}
1162 
1163 		/* Detect and store PCS duplex */
1164 		if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
1165 			*duplex = FULL_DUPLEX;
1166 		} else {
1167 			*duplex = HALF_DUPLEX;
1168 		}
1169 	}
1170 
1171 	return E1000_SUCCESS;
1172 }
1173 
1174 /**
1175  *  e1000_shutdown_serdes_link_82575 - Remove link during power down
1176  *  @hw: pointer to the HW structure
1177  *
1178  *  In the case of serdes shut down sfp and PCS on driver unload
1179  *  when management pass thru is not enabled.
1180  **/
1181 void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
1182 {
1183 	u32 reg;
1184 
1185 	DEBUGFUNC("e1000_shutdown_serdes_link_82575");
1186 
1187 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1188 	    !e1000_sgmii_active_82575(hw))
1189 		return;
1190 
1191 	if (!e1000_enable_mng_pass_thru(hw)) {
1192 		/* Disable PCS to turn off link */
1193 		reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1194 		reg &= ~E1000_PCS_CFG_PCS_EN;
1195 		E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1196 
1197 		/* shutdown the laser */
1198 		reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1199 		reg |= E1000_CTRL_EXT_SDP3_DATA;
1200 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1201 
1202 		/* flush the write to verify completion */
1203 		E1000_WRITE_FLUSH(hw);
1204 		msec_delay(1);
1205 	}
1206 
1207 	return;
1208 }
1209 
1210 /**
1211  *  e1000_reset_hw_82575 - Reset hardware
1212  *  @hw: pointer to the HW structure
1213  *
1214  *  This resets the hardware into a known state.
1215  **/
1216 static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
1217 {
1218 	u32 ctrl;
1219 	s32 ret_val;
1220 
1221 	DEBUGFUNC("e1000_reset_hw_82575");
1222 
1223 	/*
1224 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
1225 	 * on the last TLP read/write transaction when MAC is reset.
1226 	 */
1227 	ret_val = e1000_disable_pcie_master_generic(hw);
1228 	if (ret_val) {
1229 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
1230 	}
1231 
1232 	/* set the completion timeout for interface */
1233 	ret_val = e1000_set_pcie_completion_timeout(hw);
1234 	if (ret_val) {
1235 		DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1236 	}
1237 
1238 	DEBUGOUT("Masking off all interrupts\n");
1239 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1240 
1241 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
1242 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1243 	E1000_WRITE_FLUSH(hw);
1244 
1245 	msec_delay(10);
1246 
1247 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
1248 
1249 	DEBUGOUT("Issuing a global reset to MAC\n");
1250 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
1251 
1252 	ret_val = e1000_get_auto_rd_done_generic(hw);
1253 	if (ret_val) {
1254 		/*
1255 		 * When auto config read does not complete, do not
1256 		 * return with an error. This can happen in situations
1257 		 * where there is no eeprom and prevents getting link.
1258 		 */
1259 		DEBUGOUT("Auto Read Done did not complete\n");
1260 	}
1261 
1262 	/* If EEPROM is not present, run manual init scripts */
1263 	if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1264 		e1000_reset_init_script_82575(hw);
1265 
1266 	/* Clear any pending interrupt events. */
1267 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1268 	E1000_READ_REG(hw, E1000_ICR);
1269 
1270 	/* Install any alternate MAC address into RAR0 */
1271 	ret_val = e1000_check_alt_mac_addr_generic(hw);
1272 
1273 	return ret_val;
1274 }
1275 
1276 /**
1277  *  e1000_init_hw_82575 - Initialize hardware
1278  *  @hw: pointer to the HW structure
1279  *
1280  *  This inits the hardware readying it for operation.
1281  **/
1282 static s32 e1000_init_hw_82575(struct e1000_hw *hw)
1283 {
1284 	struct e1000_mac_info *mac = &hw->mac;
1285 	s32 ret_val;
1286 	u16 i, rar_count = mac->rar_entry_count;
1287 
1288 	DEBUGFUNC("e1000_init_hw_82575");
1289 
1290 	/* Initialize identification LED */
1291 	ret_val = mac->ops.id_led_init(hw);
1292 	if (ret_val) {
1293 		DEBUGOUT("Error initializing identification LED\n");
1294 		/* This is not fatal and we should not stop init due to this */
1295 	}
1296 
1297 	/* Disabling VLAN filtering */
1298 	DEBUGOUT("Initializing the IEEE VLAN\n");
1299 	mac->ops.clear_vfta(hw);
1300 
1301 	/* Setup the receive address */
1302 	e1000_init_rx_addrs_generic(hw, rar_count);
1303 
1304 	/* Zero out the Multicast HASH table */
1305 	DEBUGOUT("Zeroing the MTA\n");
1306 	for (i = 0; i < mac->mta_reg_count; i++)
1307 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1308 
1309 	/* Zero out the Unicast HASH table */
1310 	DEBUGOUT("Zeroing the UTA\n");
1311 	for (i = 0; i < mac->uta_reg_count; i++)
1312 		E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
1313 
1314 	/* Setup link and flow control */
1315 	ret_val = mac->ops.setup_link(hw);
1316 
1317 	/*
1318 	 * Clear all of the statistics registers (clear on read).  It is
1319 	 * important that we do this after we have tried to establish link
1320 	 * because the symbol error count will increment wildly if there
1321 	 * is no link.
1322 	 */
1323 	e1000_clear_hw_cntrs_82575(hw);
1324 
1325 	return ret_val;
1326 }
1327 
1328 /**
1329  *  e1000_setup_copper_link_82575 - Configure copper link settings
1330  *  @hw: pointer to the HW structure
1331  *
1332  *  Configures the link for auto-neg or forced speed and duplex.  Then we check
1333  *  for link, once link is established calls to configure collision distance
1334  *  and flow control are called.
1335  **/
1336 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1337 {
1338 	u32 ctrl;
1339 	s32  ret_val;
1340 
1341 	DEBUGFUNC("e1000_setup_copper_link_82575");
1342 
1343 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
1344 	ctrl |= E1000_CTRL_SLU;
1345 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1346 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1347 
1348 	ret_val = e1000_setup_serdes_link_82575(hw);
1349 	if (ret_val)
1350 		goto out;
1351 
1352 	if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1353 		/* allow time for SFP cage time to power up phy */
1354 		msec_delay(300);
1355 
1356 		ret_val = hw->phy.ops.reset(hw);
1357 		if (ret_val) {
1358 			DEBUGOUT("Error resetting the PHY.\n");
1359 			goto out;
1360 		}
1361 	}
1362 	switch (hw->phy.type) {
1363 	case e1000_phy_m88:
1364 		if (hw->phy.id == I347AT4_E_PHY_ID ||
1365 		    hw->phy.id == M88E1112_E_PHY_ID ||
1366 		    hw->phy.id == M88E1340M_E_PHY_ID)
1367 			ret_val = e1000_copper_link_setup_m88_gen2(hw);
1368 		else
1369 			ret_val = e1000_copper_link_setup_m88(hw);
1370 		break;
1371 	case e1000_phy_igp_3:
1372 		ret_val = e1000_copper_link_setup_igp(hw);
1373 		break;
1374 	case e1000_phy_82580:
1375 		ret_val = e1000_copper_link_setup_82577(hw);
1376 		break;
1377 	default:
1378 		ret_val = -E1000_ERR_PHY;
1379 		break;
1380 	}
1381 
1382 	if (ret_val)
1383 		goto out;
1384 
1385 	ret_val = e1000_setup_copper_link_generic(hw);
1386 out:
1387 	return ret_val;
1388 }
1389 
1390 /**
1391  *  e1000_setup_serdes_link_82575 - Setup link for serdes
1392  *  @hw: pointer to the HW structure
1393  *
1394  *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
1395  *  used on copper connections where the serialized gigabit media independent
1396  *  interface (sgmii), or serdes fiber is being used.  Configures the link
1397  *  for auto-negotiation or forces speed/duplex.
1398  **/
1399 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
1400 {
1401 	u32 ctrl_ext, ctrl_reg, reg;
1402 	bool pcs_autoneg;
1403 
1404 	DEBUGFUNC("e1000_setup_serdes_link_82575");
1405 
1406 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1407 	    !e1000_sgmii_active_82575(hw))
1408 		return E1000_SUCCESS;
1409 
1410 	/*
1411 	 * On the 82575, SerDes loopback mode persists until it is
1412 	 * explicitly turned off or a power cycle is performed.  A read to
1413 	 * the register does not indicate its status.  Therefore, we ensure
1414 	 * loopback mode is disabled during initialization.
1415 	 */
1416 	E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1417 
1418 	/* power on the sfp cage if present */
1419 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1420 	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1421 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1422 
1423 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1424 	ctrl_reg |= E1000_CTRL_SLU;
1425 
1426 	/* set both sw defined pins on 82575/82576*/
1427 	if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
1428 		ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1429 
1430 	reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1431 
1432 	/* default pcs_autoneg to the same setting as mac autoneg */
1433 	pcs_autoneg = hw->mac.autoneg;
1434 
1435 	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1436 	case E1000_CTRL_EXT_LINK_MODE_SGMII:
1437 		/* sgmii mode lets the phy handle forcing speed/duplex */
1438 		pcs_autoneg = TRUE;
1439 		/* autoneg time out should be disabled for SGMII mode */
1440 		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1441 		break;
1442 	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1443 		/* disable PCS autoneg and support parallel detect only */
1444 		pcs_autoneg = FALSE;
1445 		/* fall through to default case */
1446 	default:
1447 		/*
1448 		 * non-SGMII modes only supports a speed of 1000/Full for the
1449 		 * link so it is best to just force the MAC and let the pcs
1450 		 * link either autoneg or be forced to 1000/Full
1451 		 */
1452 		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1453 		            E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1454 
1455 		/* set speed of 1000/Full if speed/duplex is forced */
1456 		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1457 		break;
1458 	}
1459 
1460 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1461 
1462 	/*
1463 	 * New SerDes mode allows for forcing speed or autonegotiating speed
1464 	 * at 1gb. Autoneg should be default set by most drivers. This is the
1465 	 * mode that will be compatible with older link partners and switches.
1466 	 * However, both are supported by the hardware and some drivers/tools.
1467 	 */
1468 	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1469 	         E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1470 
1471 	/*
1472 	 * We force flow control to prevent the CTRL register values from being
1473 	 * overwritten by the autonegotiated flow control values
1474 	 */
1475 	reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1476 
1477 	if (pcs_autoneg) {
1478 		/* Set PCS register for autoneg */
1479 		reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1480 		       E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1481 		DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1482 	} else {
1483 		/* Set PCS register for forced link */
1484 		reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
1485 		DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1486 	}
1487 
1488 	E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1489 
1490 	if (!e1000_sgmii_active_82575(hw))
1491 		e1000_force_mac_fc_generic(hw);
1492 
1493 	return E1000_SUCCESS;
1494 }
1495 
1496 /**
1497  *  e1000_valid_led_default_82575 - Verify a valid default LED config
1498  *  @hw: pointer to the HW structure
1499  *  @data: pointer to the NVM (EEPROM)
1500  *
1501  *  Read the EEPROM for the current default LED configuration.  If the
1502  *  LED configuration is not valid, set to a valid LED configuration.
1503  **/
1504 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
1505 {
1506 	s32 ret_val;
1507 
1508 	DEBUGFUNC("e1000_valid_led_default_82575");
1509 
1510 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1511 	if (ret_val) {
1512 		DEBUGOUT("NVM Read Error\n");
1513 		goto out;
1514 	}
1515 
1516 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1517 		switch(hw->phy.media_type) {
1518 		case e1000_media_type_internal_serdes:
1519 			*data = ID_LED_DEFAULT_82575_SERDES;
1520 			break;
1521 		case e1000_media_type_copper:
1522 		default:
1523 			*data = ID_LED_DEFAULT;
1524 			break;
1525 		}
1526 	}
1527 out:
1528 	return ret_val;
1529 }
1530 
1531 /**
1532  *  e1000_sgmii_active_82575 - Return sgmii state
1533  *  @hw: pointer to the HW structure
1534  *
1535  *  82575 silicon has a serialized gigabit media independent interface (sgmii)
1536  *  which can be enabled for use in the embedded applications.  Simply
1537  *  return the current state of the sgmii interface.
1538  **/
1539 static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
1540 {
1541 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1542 	return dev_spec->sgmii_active;
1543 }
1544 
1545 /**
1546  *  e1000_reset_init_script_82575 - Inits HW defaults after reset
1547  *  @hw: pointer to the HW structure
1548  *
1549  *  Inits recommended HW defaults after a reset when there is no EEPROM
1550  *  detected. This is only for the 82575.
1551  **/
1552 static s32 e1000_reset_init_script_82575(struct e1000_hw* hw)
1553 {
1554 	DEBUGFUNC("e1000_reset_init_script_82575");
1555 
1556 	if (hw->mac.type == e1000_82575) {
1557 		DEBUGOUT("Running reset init script for 82575\n");
1558 		/* SerDes configuration via SERDESCTRL */
1559 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
1560 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
1561 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
1562 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
1563 
1564 		/* CCM configuration via CCMCTL register */
1565 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
1566 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
1567 
1568 		/* PCIe lanes configuration */
1569 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
1570 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
1571 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
1572 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
1573 
1574 		/* PCIe PLL Configuration */
1575 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
1576 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
1577 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
1578 	}
1579 
1580 	return E1000_SUCCESS;
1581 }
1582 
1583 /**
1584  *  e1000_read_mac_addr_82575 - Read device MAC address
1585  *  @hw: pointer to the HW structure
1586  **/
1587 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
1588 {
1589 	s32 ret_val = E1000_SUCCESS;
1590 
1591 	DEBUGFUNC("e1000_read_mac_addr_82575");
1592 
1593 	/*
1594 	 * If there's an alternate MAC address place it in RAR0
1595 	 * so that it will override the Si installed default perm
1596 	 * address.
1597 	 */
1598 	ret_val = e1000_check_alt_mac_addr_generic(hw);
1599 	if (ret_val)
1600 		goto out;
1601 
1602 	ret_val = e1000_read_mac_addr_generic(hw);
1603 
1604 out:
1605 	return ret_val;
1606 }
1607 
1608 /**
1609  *  e1000_config_collision_dist_82575 - Configure collision distance
1610  *  @hw: pointer to the HW structure
1611  *
1612  *  Configures the collision distance to the default value and is used
1613  *  during link setup.
1614  **/
1615 static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
1616 {
1617 	u32 tctl_ext;
1618 
1619 	DEBUGFUNC("e1000_config_collision_dist_82575");
1620 
1621 	tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
1622 
1623 	tctl_ext &= ~E1000_TCTL_EXT_COLD;
1624 	tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
1625 
1626 	E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
1627 	E1000_WRITE_FLUSH(hw);
1628 }
1629 
1630 /**
1631  * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
1632  * @hw: pointer to the HW structure
1633  *
1634  * In the case of a PHY power down to save power, or to turn off link during a
1635  * driver unload, or wake on lan is not enabled, remove the link.
1636  **/
1637 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
1638 {
1639 	struct e1000_phy_info *phy = &hw->phy;
1640 
1641 	if (!(phy->ops.check_reset_block))
1642 		return;
1643 
1644 	/* If the management interface is not enabled, then power down */
1645 	if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
1646 		e1000_power_down_phy_copper(hw);
1647 
1648 	return;
1649 }
1650 
1651 /**
1652  *  e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
1653  *  @hw: pointer to the HW structure
1654  *
1655  *  Clears the hardware counters by reading the counter registers.
1656  **/
1657 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
1658 {
1659 	DEBUGFUNC("e1000_clear_hw_cntrs_82575");
1660 
1661 	e1000_clear_hw_cntrs_base_generic(hw);
1662 
1663 	E1000_READ_REG(hw, E1000_PRC64);
1664 	E1000_READ_REG(hw, E1000_PRC127);
1665 	E1000_READ_REG(hw, E1000_PRC255);
1666 	E1000_READ_REG(hw, E1000_PRC511);
1667 	E1000_READ_REG(hw, E1000_PRC1023);
1668 	E1000_READ_REG(hw, E1000_PRC1522);
1669 	E1000_READ_REG(hw, E1000_PTC64);
1670 	E1000_READ_REG(hw, E1000_PTC127);
1671 	E1000_READ_REG(hw, E1000_PTC255);
1672 	E1000_READ_REG(hw, E1000_PTC511);
1673 	E1000_READ_REG(hw, E1000_PTC1023);
1674 	E1000_READ_REG(hw, E1000_PTC1522);
1675 
1676 	E1000_READ_REG(hw, E1000_ALGNERRC);
1677 	E1000_READ_REG(hw, E1000_RXERRC);
1678 	E1000_READ_REG(hw, E1000_TNCRS);
1679 	E1000_READ_REG(hw, E1000_CEXTERR);
1680 	E1000_READ_REG(hw, E1000_TSCTC);
1681 	E1000_READ_REG(hw, E1000_TSCTFC);
1682 
1683 	E1000_READ_REG(hw, E1000_MGTPRC);
1684 	E1000_READ_REG(hw, E1000_MGTPDC);
1685 	E1000_READ_REG(hw, E1000_MGTPTC);
1686 
1687 	E1000_READ_REG(hw, E1000_IAC);
1688 	E1000_READ_REG(hw, E1000_ICRXOC);
1689 
1690 	E1000_READ_REG(hw, E1000_ICRXPTC);
1691 	E1000_READ_REG(hw, E1000_ICRXATC);
1692 	E1000_READ_REG(hw, E1000_ICTXPTC);
1693 	E1000_READ_REG(hw, E1000_ICTXATC);
1694 	E1000_READ_REG(hw, E1000_ICTXQEC);
1695 	E1000_READ_REG(hw, E1000_ICTXQMTC);
1696 	E1000_READ_REG(hw, E1000_ICRXDMTC);
1697 
1698 	E1000_READ_REG(hw, E1000_CBTMPC);
1699 	E1000_READ_REG(hw, E1000_HTDPMC);
1700 	E1000_READ_REG(hw, E1000_CBRMPC);
1701 	E1000_READ_REG(hw, E1000_RPTHC);
1702 	E1000_READ_REG(hw, E1000_HGPTC);
1703 	E1000_READ_REG(hw, E1000_HTCBDPC);
1704 	E1000_READ_REG(hw, E1000_HGORCL);
1705 	E1000_READ_REG(hw, E1000_HGORCH);
1706 	E1000_READ_REG(hw, E1000_HGOTCL);
1707 	E1000_READ_REG(hw, E1000_HGOTCH);
1708 	E1000_READ_REG(hw, E1000_LENERRS);
1709 
1710 	/* This register should not be read in copper configurations */
1711 	if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
1712 	    e1000_sgmii_active_82575(hw))
1713 		E1000_READ_REG(hw, E1000_SCVPC);
1714 }
1715 
1716 /**
1717  *  e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
1718  *  @hw: pointer to the HW structure
1719  *
1720  *  After rx enable if managability is enabled then there is likely some
1721  *  bad data at the start of the fifo and possibly in the DMA fifo.  This
1722  *  function clears the fifos and flushes any packets that came in as rx was
1723  *  being enabled.
1724  **/
1725 void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
1726 {
1727 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1728 	int i, ms_wait;
1729 
1730 	DEBUGFUNC("e1000_rx_fifo_workaround_82575");
1731 	if (hw->mac.type != e1000_82575 ||
1732 	    !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1733 		return;
1734 
1735 	/* Disable all Rx queues */
1736 	for (i = 0; i < 4; i++) {
1737 		rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
1738 		E1000_WRITE_REG(hw, E1000_RXDCTL(i),
1739 		                rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1740 	}
1741 	/* Poll all queues to verify they have shut down */
1742 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1743 		msec_delay(1);
1744 		rx_enabled = 0;
1745 		for (i = 0; i < 4; i++)
1746 			rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
1747 		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1748 			break;
1749 	}
1750 
1751 	if (ms_wait == 10)
1752 		DEBUGOUT("Queue disable timed out after 10ms\n");
1753 
1754 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1755 	 * incoming packets are rejected.  Set enable and wait 2ms so that
1756 	 * any packet that was coming in as RCTL.EN was set is flushed
1757 	 */
1758 	rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1759 	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1760 
1761 	rlpml = E1000_READ_REG(hw, E1000_RLPML);
1762 	E1000_WRITE_REG(hw, E1000_RLPML, 0);
1763 
1764 	rctl = E1000_READ_REG(hw, E1000_RCTL);
1765 	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1766 	temp_rctl |= E1000_RCTL_LPE;
1767 
1768 	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
1769 	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1770 	E1000_WRITE_FLUSH(hw);
1771 	msec_delay(2);
1772 
1773 	/* Enable Rx queues that were previously enabled and restore our
1774 	 * previous state
1775 	 */
1776 	for (i = 0; i < 4; i++)
1777 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
1778 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1779 	E1000_WRITE_FLUSH(hw);
1780 
1781 	E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
1782 	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1783 
1784 	/* Flush receive errors generated by workaround */
1785 	E1000_READ_REG(hw, E1000_ROC);
1786 	E1000_READ_REG(hw, E1000_RNBC);
1787 	E1000_READ_REG(hw, E1000_MPC);
1788 }
1789 
1790 /**
1791  *  e1000_set_pcie_completion_timeout - set pci-e completion timeout
1792  *  @hw: pointer to the HW structure
1793  *
1794  *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1795  *  however the hardware default for these parts is 500us to 1ms which is less
1796  *  than the 10ms recommended by the pci-e spec.  To address this we need to
1797  *  increase the value to either 10ms to 200ms for capability version 1 config,
1798  *  or 16ms to 55ms for version 2.
1799  **/
1800 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
1801 {
1802 	u32 gcr = E1000_READ_REG(hw, E1000_GCR);
1803 	s32 ret_val = E1000_SUCCESS;
1804 	u16 pcie_devctl2;
1805 
1806 	/* only take action if timeout value is defaulted to 0 */
1807 	if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1808 		goto out;
1809 
1810 	/*
1811 	 * if capababilities version is type 1 we can write the
1812 	 * timeout of 10ms to 200ms through the GCR register
1813 	 */
1814 	if (!(gcr & E1000_GCR_CAP_VER2)) {
1815 		gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1816 		goto out;
1817 	}
1818 
1819 	/*
1820 	 * for version 2 capabilities we need to write the config space
1821 	 * directly in order to set the completion timeout value for
1822 	 * 16ms to 55ms
1823 	 */
1824 	ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1825 	                                  &pcie_devctl2);
1826 	if (ret_val)
1827 		goto out;
1828 
1829 	pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1830 
1831 	ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1832 	                                   &pcie_devctl2);
1833 out:
1834 	/* disable completion timeout resend */
1835 	gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1836 
1837 	E1000_WRITE_REG(hw, E1000_GCR, gcr);
1838 	return ret_val;
1839 }
1840 
1841 /**
1842  *  e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
1843  *  @hw: pointer to the hardware struct
1844  *  @enable: state to enter, either enabled or disabled
1845  *  @pf: Physical Function pool - do not set anti-spoofing for the PF
1846  *
1847  *  enables/disables L2 switch anti-spoofing functionality.
1848  **/
1849 void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1850 {
1851 	u32 dtxswc;
1852 
1853 	switch (hw->mac.type) {
1854 	case e1000_82576:
1855 	case e1000_i350:
1856 		dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1857 		if (enable) {
1858 			dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1859 				   E1000_DTXSWC_VLAN_SPOOF_MASK);
1860 			/* The PF can spoof - it has to in order to
1861 			 * support emulation mode NICs */
1862 			dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1863 		} else {
1864 			dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1865 				    E1000_DTXSWC_VLAN_SPOOF_MASK);
1866 		}
1867 		E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1868 		break;
1869 	default:
1870 		break;
1871 	}
1872 }
1873 
1874 /**
1875  *  e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
1876  *  @hw: pointer to the hardware struct
1877  *  @enable: state to enter, either enabled or disabled
1878  *
1879  *  enables/disables L2 switch loopback functionality.
1880  **/
1881 void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1882 {
1883 	u32 dtxswc;
1884 
1885 	switch (hw->mac.type) {
1886 	case e1000_82576:
1887 		dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1888 		if (enable)
1889 			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1890 		else
1891 			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1892 		E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1893 		break;
1894 	case e1000_i350:
1895 		dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
1896 		if (enable)
1897 			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1898 		else
1899 			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1900 		E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
1901 		break;
1902 	default:
1903 		/* Currently no other hardware supports loopback */
1904 		break;
1905 	}
1906 
1907 
1908 }
1909 
1910 /**
1911  *  e1000_vmdq_set_replication_pf - enable or disable vmdq replication
1912  *  @hw: pointer to the hardware struct
1913  *  @enable: state to enter, either enabled or disabled
1914  *
1915  *  enables/disables replication of packets across multiple pools.
1916  **/
1917 void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1918 {
1919 	u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1920 
1921 	if (enable)
1922 		vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1923 	else
1924 		vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1925 
1926 	E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1927 }
1928 
1929 /**
1930  *  e1000_read_phy_reg_82580 - Read 82580 MDI control register
1931  *  @hw: pointer to the HW structure
1932  *  @offset: register offset to be read
1933  *  @data: pointer to the read data
1934  *
1935  *  Reads the MDI control register in the PHY at offset and stores the
1936  *  information read to data.
1937  **/
1938 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1939 {
1940 	s32 ret_val;
1941 
1942 	DEBUGFUNC("e1000_read_phy_reg_82580");
1943 
1944 	ret_val = hw->phy.ops.acquire(hw);
1945 	if (ret_val)
1946 		goto out;
1947 
1948 	ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
1949 
1950 	hw->phy.ops.release(hw);
1951 
1952 out:
1953 	return ret_val;
1954 }
1955 
1956 /**
1957  *  e1000_write_phy_reg_82580 - Write 82580 MDI control register
1958  *  @hw: pointer to the HW structure
1959  *  @offset: register offset to write to
1960  *  @data: data to write to register at offset
1961  *
1962  *  Writes data to MDI control register in the PHY at offset.
1963  **/
1964 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1965 {
1966 	s32 ret_val;
1967 
1968 	DEBUGFUNC("e1000_write_phy_reg_82580");
1969 
1970 	ret_val = hw->phy.ops.acquire(hw);
1971 	if (ret_val)
1972 		goto out;
1973 
1974 	ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
1975 
1976 	hw->phy.ops.release(hw);
1977 
1978 out:
1979 	return ret_val;
1980 }
1981 
1982 /**
1983  *  e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
1984  *  @hw: pointer to the HW structure
1985  *
1986  *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
1987  *  the values found in the EEPROM.  This addresses an issue in which these
1988  *  bits are not restored from EEPROM after reset.
1989  **/
1990 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
1991 {
1992 	s32 ret_val = E1000_SUCCESS;
1993 	u32 mdicnfg;
1994 	u16 nvm_data = 0;
1995 
1996 	DEBUGFUNC("e1000_reset_mdicnfg_82580");
1997 
1998 	if (hw->mac.type != e1000_82580)
1999 		goto out;
2000 	if (!e1000_sgmii_active_82575(hw))
2001 		goto out;
2002 
2003 	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2004 	                           NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2005 	                           &nvm_data);
2006 	if (ret_val) {
2007 		DEBUGOUT("NVM Read Error\n");
2008 		goto out;
2009 	}
2010 
2011 	mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
2012 	if (nvm_data & NVM_WORD24_EXT_MDIO)
2013 		mdicnfg |= E1000_MDICNFG_EXT_MDIO;
2014 	if (nvm_data & NVM_WORD24_COM_MDIO)
2015 		mdicnfg |= E1000_MDICNFG_COM_MDIO;
2016 	E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
2017 out:
2018 	return ret_val;
2019 }
2020 
2021 /**
2022  *  e1000_reset_hw_82580 - Reset hardware
2023  *  @hw: pointer to the HW structure
2024  *
2025  *  This resets function or entire device (all ports, etc.)
2026  *  to a known state.
2027  **/
2028 static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
2029 {
2030 	s32 ret_val = E1000_SUCCESS;
2031 	/* BH SW mailbox bit in SW_FW_SYNC */
2032 	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2033 	u32 ctrl;
2034 	bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2035 
2036 	DEBUGFUNC("e1000_reset_hw_82580");
2037 
2038 	hw->dev_spec._82575.global_device_reset = FALSE;
2039 
2040 	/* Get current control state. */
2041 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
2042 
2043 	/*
2044 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
2045 	 * on the last TLP read/write transaction when MAC is reset.
2046 	 */
2047 	ret_val = e1000_disable_pcie_master_generic(hw);
2048 	if (ret_val)
2049 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
2050 
2051 	DEBUGOUT("Masking off all interrupts\n");
2052 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2053 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
2054 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
2055 	E1000_WRITE_FLUSH(hw);
2056 
2057 	msec_delay(10);
2058 
2059 	/* Determine whether or not a global dev reset is requested */
2060 	if (global_device_reset &&
2061 		e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
2062 			global_device_reset = FALSE;
2063 
2064 	if (global_device_reset &&
2065 		!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
2066 		ctrl |= E1000_CTRL_DEV_RST;
2067 	else
2068 		ctrl |= E1000_CTRL_RST;
2069 
2070 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
2071 
2072 	/* Add delay to insure DEV_RST has time to complete */
2073 	if (global_device_reset)
2074 		msec_delay(5);
2075 
2076 	ret_val = e1000_get_auto_rd_done_generic(hw);
2077 	if (ret_val) {
2078 		/*
2079 		 * When auto config read does not complete, do not
2080 		 * return with an error. This can happen in situations
2081 		 * where there is no eeprom and prevents getting link.
2082 		 */
2083 		DEBUGOUT("Auto Read Done did not complete\n");
2084 	}
2085 
2086 	/* If EEPROM is not present, run manual init scripts */
2087 	if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
2088 		e1000_reset_init_script_82575(hw);
2089 
2090 	/* clear global device reset status bit */
2091 	E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
2092 
2093 	/* Clear any pending interrupt events. */
2094 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2095 	E1000_READ_REG(hw, E1000_ICR);
2096 
2097 	ret_val = e1000_reset_mdicnfg_82580(hw);
2098 	if (ret_val)
2099 		DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
2100 
2101 	/* Install any alternate MAC address into RAR0 */
2102 	ret_val = e1000_check_alt_mac_addr_generic(hw);
2103 
2104 	/* Release semaphore */
2105 	if (global_device_reset)
2106 		e1000_release_swfw_sync_82575(hw, swmbsw_mask);
2107 
2108 	return ret_val;
2109 }
2110 
2111 /**
2112  *  e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
2113  *  @data: data received by reading RXPBS register
2114  *
2115  *  The 82580 uses a table based approach for packet buffer allocation sizes.
2116  *  This function converts the retrieved value into the correct table value
2117  *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2118  *  0x0 36  72 144   1   2   4   8  16
2119  *  0x8 35  70 140 rsv rsv rsv rsv rsv
2120  */
2121 u16 e1000_rxpbs_adjust_82580(u32 data)
2122 {
2123 	u16 ret_val = 0;
2124 
2125 	if (data < E1000_82580_RXPBS_TABLE_SIZE)
2126 		ret_val = e1000_82580_rxpbs_table[data];
2127 
2128 	return ret_val;
2129 }
2130 
2131 /**
2132  *  e1000_validate_nvm_checksum_with_offset - Validate EEPROM
2133  *  checksum
2134  *  @hw: pointer to the HW structure
2135  *  @offset: offset in words of the checksum protected region
2136  *
2137  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2138  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
2139  **/
2140 s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2141 {
2142 	s32 ret_val = E1000_SUCCESS;
2143 	u16 checksum = 0;
2144 	u16 i, nvm_data;
2145 
2146 	DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
2147 
2148 	for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2149 		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2150 		if (ret_val) {
2151 			DEBUGOUT("NVM Read Error\n");
2152 			goto out;
2153 		}
2154 		checksum += nvm_data;
2155 	}
2156 
2157 	if (checksum != (u16) NVM_SUM) {
2158 		DEBUGOUT("NVM Checksum Invalid\n");
2159 		ret_val = -E1000_ERR_NVM;
2160 		goto out;
2161 	}
2162 
2163 out:
2164 	return ret_val;
2165 }
2166 
2167 /**
2168  *  e1000_update_nvm_checksum_with_offset - Update EEPROM
2169  *  checksum
2170  *  @hw: pointer to the HW structure
2171  *  @offset: offset in words of the checksum protected region
2172  *
2173  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
2174  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
2175  *  value to the EEPROM.
2176  **/
2177 s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2178 {
2179 	s32 ret_val;
2180 	u16 checksum = 0;
2181 	u16 i, nvm_data;
2182 
2183 	DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
2184 
2185 	for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2186 		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2187 		if (ret_val) {
2188 			DEBUGOUT("NVM Read Error while updating checksum.\n");
2189 			goto out;
2190 		}
2191 		checksum += nvm_data;
2192 	}
2193 	checksum = (u16) NVM_SUM - checksum;
2194 	ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2195 				&checksum);
2196 	if (ret_val)
2197 		DEBUGOUT("NVM Write Error while updating checksum.\n");
2198 
2199 out:
2200 	return ret_val;
2201 }
2202 
2203 /**
2204  *  e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
2205  *  @hw: pointer to the HW structure
2206  *
2207  *  Calculates the EEPROM section checksum by reading/adding each word of
2208  *  the EEPROM and then verifies that the sum of the EEPROM is
2209  *  equal to 0xBABA.
2210  **/
2211 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
2212 {
2213 	s32 ret_val = E1000_SUCCESS;
2214 	u16 eeprom_regions_count = 1;
2215 	u16 j, nvm_data;
2216 	u16 nvm_offset;
2217 
2218 	DEBUGFUNC("e1000_validate_nvm_checksum_82580");
2219 
2220 	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2221 	if (ret_val) {
2222 		DEBUGOUT("NVM Read Error\n");
2223 		goto out;
2224 	}
2225 
2226 	if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2227 		/* if chekcsums compatibility bit is set validate checksums
2228 		 * for all 4 ports. */
2229 		eeprom_regions_count = 4;
2230 	}
2231 
2232 	for (j = 0; j < eeprom_regions_count; j++) {
2233 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2234 		ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2235 								nvm_offset);
2236 		if (ret_val != E1000_SUCCESS)
2237 			goto out;
2238 	}
2239 
2240 out:
2241 	return ret_val;
2242 }
2243 
2244 /**
2245  *  e1000_update_nvm_checksum_82580 - Update EEPROM checksum
2246  *  @hw: pointer to the HW structure
2247  *
2248  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2249  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2250  *  checksum and writes the value to the EEPROM.
2251  **/
2252 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
2253 {
2254 	s32 ret_val;
2255 	u16 j, nvm_data;
2256 	u16 nvm_offset;
2257 
2258 	DEBUGFUNC("e1000_update_nvm_checksum_82580");
2259 
2260 	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2261 	if (ret_val) {
2262 		DEBUGOUT("NVM Read Error while updating checksum"
2263 			" compatibility bit.\n");
2264 		goto out;
2265 	}
2266 
2267 	if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2268 		/* set compatibility bit to validate checksums appropriately */
2269 		nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2270 		ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2271 					&nvm_data);
2272 		if (ret_val) {
2273 			DEBUGOUT("NVM Write Error while updating checksum"
2274 				" compatibility bit.\n");
2275 			goto out;
2276 		}
2277 	}
2278 
2279 	for (j = 0; j < 4; j++) {
2280 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2281 		ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2282 		if (ret_val) {
2283 			goto out;
2284 		}
2285 	}
2286 
2287 out:
2288 	return ret_val;
2289 }
2290 
2291 /**
2292  *  e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
2293  *  @hw: pointer to the HW structure
2294  *
2295  *  Calculates the EEPROM section checksum by reading/adding each word of
2296  *  the EEPROM and then verifies that the sum of the EEPROM is
2297  *  equal to 0xBABA.
2298  **/
2299 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
2300 {
2301 	s32 ret_val = E1000_SUCCESS;
2302 	u16 j;
2303 	u16 nvm_offset;
2304 
2305 	DEBUGFUNC("e1000_validate_nvm_checksum_i350");
2306 
2307 	for (j = 0; j < 4; j++) {
2308 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2309 		ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2310 								nvm_offset);
2311 		if (ret_val != E1000_SUCCESS)
2312 			goto out;
2313 	}
2314 
2315 out:
2316 	return ret_val;
2317 }
2318 
2319 /**
2320  *  e1000_update_nvm_checksum_i350 - Update EEPROM checksum
2321  *  @hw: pointer to the HW structure
2322  *
2323  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2324  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2325  *  checksum and writes the value to the EEPROM.
2326  **/
2327 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
2328 {
2329 	s32 ret_val = E1000_SUCCESS;
2330 	u16 j;
2331 	u16 nvm_offset;
2332 
2333 	DEBUGFUNC("e1000_update_nvm_checksum_i350");
2334 
2335 	for (j = 0; j < 4; j++) {
2336 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2337 		ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2338 		if (ret_val != E1000_SUCCESS)
2339 			goto out;
2340 	}
2341 
2342 out:
2343 	return ret_val;
2344 }
2345 
2346 /**
2347  *  e1000_set_eee_i350 - Enable/disable EEE support
2348  *  @hw: pointer to the HW structure
2349  *
2350  *  Enable/disable EEE based on setting in dev_spec structure.
2351  *
2352  **/
2353 s32 e1000_set_eee_i350(struct e1000_hw *hw)
2354 {
2355 	s32 ret_val = E1000_SUCCESS;
2356 	u32 ipcnfg, eeer, ctrl_ext;
2357 
2358 	DEBUGFUNC("e1000_set_eee_i350");
2359 
2360 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2361 	if ((hw->mac.type != e1000_i350) ||
2362 	    (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
2363 		goto out;
2364 	ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2365 	eeer = E1000_READ_REG(hw, E1000_EEER);
2366 
2367 	/* enable or disable per user setting */
2368 	if (!(hw->dev_spec._82575.eee_disable)) {
2369 		ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
2370 		           E1000_IPCNFG_EEE_100M_AN);
2371 		eeer |= (E1000_EEER_TX_LPI_EN |
2372 		         E1000_EEER_RX_LPI_EN |
2373 		         E1000_EEER_LPI_FC);
2374 
2375 	} else {
2376 		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2377 		            E1000_IPCNFG_EEE_100M_AN);
2378 		eeer &= ~(E1000_EEER_TX_LPI_EN |
2379 		          E1000_EEER_RX_LPI_EN |
2380 		          E1000_EEER_LPI_FC);
2381 	}
2382 	E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
2383 	E1000_WRITE_REG(hw, E1000_EEER, eeer);
2384 			E1000_READ_REG(hw, E1000_IPCNFG);
2385 			E1000_READ_REG(hw, E1000_EEER);
2386 out:
2387 
2388 	return ret_val;
2389 }
2390