xref: /dragonfly/sys/dev/netif/ig_hal/e1000_82575.c (revision 8accc937)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2011, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /*
36  * 82575EB Gigabit Network Connection
37  * 82575EB Gigabit Backplane Connection
38  * 82575GB Gigabit Network Connection
39  * 82576 Gigabit Network Connection
40  * 82576 Quad Port Gigabit Mezzanine Adapter
41  */
42 
43 #include "e1000_api.h"
44 
45 static s32  e1000_init_phy_params_82575(struct e1000_hw *hw);
46 static s32  e1000_init_mac_params_82575(struct e1000_hw *hw);
47 static s32  e1000_acquire_phy_82575(struct e1000_hw *hw);
48 static void e1000_release_phy_82575(struct e1000_hw *hw);
49 static s32  e1000_acquire_nvm_82575(struct e1000_hw *hw);
50 static void e1000_release_nvm_82575(struct e1000_hw *hw);
51 static s32  e1000_check_for_link_82575(struct e1000_hw *hw);
52 static s32  e1000_get_cfg_done_82575(struct e1000_hw *hw);
53 static s32  e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
54                                          u16 *duplex);
55 static s32  e1000_init_hw_82575(struct e1000_hw *hw);
56 static s32  e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
57 static s32  e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
58                                            u16 *data);
59 static s32  e1000_reset_hw_82575(struct e1000_hw *hw);
60 static s32  e1000_reset_hw_82580(struct e1000_hw *hw);
61 static s32  e1000_read_phy_reg_82580(struct e1000_hw *hw,
62                                     u32 offset, u16 *data);
63 static s32  e1000_write_phy_reg_82580(struct e1000_hw *hw,
64                                      u32 offset, u16 data);
65 static s32  e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
66                                           bool active);
67 static s32  e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
68                                           bool active);
69 static s32  e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
70                                           bool active);
71 static s32  e1000_setup_copper_link_82575(struct e1000_hw *hw);
72 static s32  e1000_setup_serdes_link_82575(struct e1000_hw *hw);
73 static s32  e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
74 static s32  e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
75                                             u32 offset, u16 data);
76 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
77 static s32  e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
78 static s32  e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
79                                                  u16 *speed, u16 *duplex);
80 static s32  e1000_get_phy_id_82575(struct e1000_hw *hw);
81 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
82 static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
83 static s32  e1000_reset_init_script_82575(struct e1000_hw *hw);
84 static s32  e1000_read_mac_addr_82575(struct e1000_hw *hw);
85 static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
86 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
87 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
88 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
89 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
90 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
91 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
92 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
93 static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
94 						u16 offset);
95 static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
96 						u16 offset);
97 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
98 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
99 
100 static const u16 e1000_82580_rxpbs_table[] =
101 	{ 36, 72, 144, 1, 2, 4, 8, 16,
102 	  35, 70, 140 };
103 #define E1000_82580_RXPBS_TABLE_SIZE \
104 	(sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
105 
106 
107 /**
108  *  e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
109  *  @hw: pointer to the HW structure
110  *
111  *  Called to determine if the I2C pins are being used for I2C or as an
112  *  external MDIO interface since the two options are mutually exclusive.
113  **/
114 static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
115 {
116 	u32 reg = 0;
117 	bool ext_mdio = FALSE;
118 
119 	DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
120 
121 	switch (hw->mac.type) {
122 	case e1000_82575:
123 	case e1000_82576:
124 		reg = E1000_READ_REG(hw, E1000_MDIC);
125 		ext_mdio = !!(reg & E1000_MDIC_DEST);
126 		break;
127 	case e1000_82580:
128 	case e1000_i350:
129 		reg = E1000_READ_REG(hw, E1000_MDICNFG);
130 		ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
131 		break;
132 	default:
133 		break;
134 	}
135 	return ext_mdio;
136 }
137 
138 /**
139  *  e1000_init_phy_params_82575 - Init PHY func ptrs.
140  *  @hw: pointer to the HW structure
141  **/
142 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
143 {
144 	struct e1000_phy_info *phy = &hw->phy;
145 	s32 ret_val = E1000_SUCCESS;
146 	u32 ctrl_ext;
147 
148 	DEBUGFUNC("e1000_init_phy_params_82575");
149 
150 	if (hw->phy.media_type != e1000_media_type_copper) {
151 		phy->type = e1000_phy_none;
152 		goto out;
153 	}
154 
155 	phy->ops.power_up   = e1000_power_up_phy_copper;
156 	phy->ops.power_down = e1000_power_down_phy_copper_82575;
157 
158 	phy->autoneg_mask           = AUTONEG_ADVERTISE_SPEED_DEFAULT;
159 	phy->reset_delay_us         = 100;
160 
161 	phy->ops.acquire            = e1000_acquire_phy_82575;
162 	phy->ops.check_reset_block  = e1000_check_reset_block_generic;
163 	phy->ops.commit             = e1000_phy_sw_reset_generic;
164 	phy->ops.get_cfg_done       = e1000_get_cfg_done_82575;
165 	phy->ops.release            = e1000_release_phy_82575;
166 
167 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
168 
169 	if (e1000_sgmii_active_82575(hw)) {
170 		phy->ops.reset      = e1000_phy_hw_reset_sgmii_82575;
171 		ctrl_ext |= E1000_CTRL_I2C_ENA;
172 	} else {
173 		phy->ops.reset      = e1000_phy_hw_reset_generic;
174 		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
175 	}
176 
177 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
178 	e1000_reset_mdicnfg_82580(hw);
179 
180 	if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
181 		phy->ops.read_reg   = e1000_read_phy_reg_sgmii_82575;
182 		phy->ops.write_reg  = e1000_write_phy_reg_sgmii_82575;
183 	} else if (hw->mac.type >= e1000_82580) {
184 		phy->ops.read_reg   = e1000_read_phy_reg_82580;
185 		phy->ops.write_reg  = e1000_write_phy_reg_82580;
186 	} else {
187 		phy->ops.read_reg   = e1000_read_phy_reg_igp;
188 		phy->ops.write_reg  = e1000_write_phy_reg_igp;
189 	}
190 
191 	/* Set phy->phy_addr and phy->id. */
192 	ret_val = e1000_get_phy_id_82575(hw);
193 
194 	/* Verify phy id and set remaining function pointers */
195 	switch (phy->id) {
196 	case I347AT4_E_PHY_ID:
197 	case M88E1112_E_PHY_ID:
198 	case M88E1340M_E_PHY_ID:
199 	case M88E1111_I_PHY_ID:
200 		phy->type                   = e1000_phy_m88;
201 		phy->ops.check_polarity     = e1000_check_polarity_m88;
202 		phy->ops.get_info           = e1000_get_phy_info_m88;
203 		if (phy->id == I347AT4_E_PHY_ID ||
204 		    phy->id == M88E1112_E_PHY_ID ||
205 		    phy->id == M88E1340M_E_PHY_ID)
206 			phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
207 		else
208 			phy->ops.get_cable_length = e1000_get_cable_length_m88;
209 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
210 		break;
211 	case IGP03E1000_E_PHY_ID:
212 	case IGP04E1000_E_PHY_ID:
213 		phy->type                   = e1000_phy_igp_3;
214 		phy->ops.check_polarity     = e1000_check_polarity_igp;
215 		phy->ops.get_info           = e1000_get_phy_info_igp;
216 		phy->ops.get_cable_length   = e1000_get_cable_length_igp_2;
217 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
218 		phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82575;
219 		phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_generic;
220 		break;
221 	case I82580_I_PHY_ID:
222 	case I350_I_PHY_ID:
223 		phy->type                   = e1000_phy_82580;
224 		phy->ops.check_polarity     = e1000_check_polarity_82577;
225 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577;
226 		phy->ops.get_cable_length   = e1000_get_cable_length_82577;
227 		phy->ops.get_info           = e1000_get_phy_info_82577;
228 		phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82580;
229 		phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_82580;
230 		break;
231 	default:
232 		ret_val = -E1000_ERR_PHY;
233 		goto out;
234 	}
235 
236 out:
237 	return ret_val;
238 }
239 
240 /**
241  *  e1000_init_nvm_params_82575 - Init NVM func ptrs.
242  *  @hw: pointer to the HW structure
243  **/
244 s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
245 {
246 	struct e1000_nvm_info *nvm = &hw->nvm;
247 	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
248 	u16 size;
249 
250 	DEBUGFUNC("e1000_init_nvm_params_82575");
251 
252 	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
253 	             E1000_EECD_SIZE_EX_SHIFT);
254 	/*
255 	 * Added to a constant, "size" becomes the left-shift value
256 	 * for setting word_size.
257 	 */
258 	size += NVM_WORD_SIZE_BASE_SHIFT;
259 
260 	nvm->word_size = 1 << size;
261 	nvm->opcode_bits        = 8;
262 	nvm->delay_usec         = 1;
263 	switch (nvm->override) {
264 	case e1000_nvm_override_spi_large:
265 		nvm->page_size    = 32;
266 		nvm->address_bits = 16;
267 		break;
268 	case e1000_nvm_override_spi_small:
269 		nvm->page_size    = 8;
270 		nvm->address_bits = 8;
271 		break;
272 	default:
273 		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
274 		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
275 		break;
276 	}
277 
278 	nvm->type = e1000_nvm_eeprom_spi;
279 
280 	if (nvm->word_size == (1 << 15))
281 		nvm->page_size = 128;
282 
283 	/* Function Pointers */
284 	nvm->ops.acquire    = e1000_acquire_nvm_82575;
285 	nvm->ops.release    = e1000_release_nvm_82575;
286 	if (nvm->word_size < (1 << 15))
287 		nvm->ops.read    = e1000_read_nvm_eerd;
288 	else
289 		nvm->ops.read    = e1000_read_nvm_spi;
290 
291 	nvm->ops.write              = e1000_write_nvm_spi;
292 	nvm->ops.validate           = e1000_validate_nvm_checksum_generic;
293 	nvm->ops.update             = e1000_update_nvm_checksum_generic;
294 	nvm->ops.valid_led_default  = e1000_valid_led_default_82575;
295 
296 	/* override genric family function pointers for specific descendants */
297 	switch (hw->mac.type) {
298 	case e1000_82580:
299 		nvm->ops.validate = e1000_validate_nvm_checksum_82580;
300 		nvm->ops.update = e1000_update_nvm_checksum_82580;
301 		break;
302 	case e1000_i350:
303 		nvm->ops.validate = e1000_validate_nvm_checksum_i350;
304 		nvm->ops.update = e1000_update_nvm_checksum_i350;
305 		break;
306 	default:
307 		break;
308 	}
309 
310 	return E1000_SUCCESS;
311 }
312 
313 /**
314  *  e1000_init_mac_params_82575 - Init MAC func ptrs.
315  *  @hw: pointer to the HW structure
316  **/
317 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
318 {
319 	struct e1000_mac_info *mac = &hw->mac;
320 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
321 	u32 ctrl_ext = 0;
322 
323 	DEBUGFUNC("e1000_init_mac_params_82575");
324 
325 	/* Set media type */
326         /*
327 	 * The 82575 uses bits 22:23 for link mode. The mode can be changed
328          * based on the EEPROM. We cannot rely upon device ID. There
329          * is no distinguishable difference between fiber and internal
330          * SerDes mode on the 82575. There can be an external PHY attached
331          * on the SGMII interface. For this, we'll set sgmii_active to TRUE.
332          */
333 	hw->phy.media_type = e1000_media_type_copper;
334 	dev_spec->sgmii_active = FALSE;
335 
336 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
337 	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
338 	case E1000_CTRL_EXT_LINK_MODE_SGMII:
339 		dev_spec->sgmii_active = TRUE;
340 		break;
341 	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
342 	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
343 		hw->phy.media_type = e1000_media_type_internal_serdes;
344 		break;
345 	default:
346 		break;
347 	}
348 
349 	/* Set mta register count */
350 	mac->mta_reg_count = 128;
351 	/* Set uta register count */
352 	mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
353 	/* Set rar entry count */
354 	mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
355 	if (mac->type == e1000_82576)
356 		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
357 	if (mac->type == e1000_82580)
358 		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
359 	if (mac->type == e1000_i350) {
360 		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
361 		/* Enable EEE default settings for i350 */
362 		dev_spec->eee_disable = FALSE;
363 	}
364 
365 	/* Set if part includes ASF firmware */
366 	mac->asf_firmware_present = TRUE;
367 	/* FWSM register */
368 	mac->has_fwsm = TRUE;
369 	/* ARC supported; valid only if manageability features are enabled. */
370 	mac->arc_subsystem_valid =
371 	        (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
372 	                ? TRUE : FALSE;
373 
374 	/* Function pointers */
375 
376 	/* bus type/speed/width */
377 	mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
378 	/* reset */
379 	if (mac->type >= e1000_82580)
380 		mac->ops.reset_hw = e1000_reset_hw_82580;
381 	else
382 	mac->ops.reset_hw = e1000_reset_hw_82575;
383 	/* hw initialization */
384 	mac->ops.init_hw = e1000_init_hw_82575;
385 	/* link setup */
386 	mac->ops.setup_link = e1000_setup_link_generic;
387 	/* physical interface link setup */
388 	mac->ops.setup_physical_interface =
389 	        (hw->phy.media_type == e1000_media_type_copper)
390 	                ? e1000_setup_copper_link_82575
391 	                : e1000_setup_serdes_link_82575;
392 	/* physical interface shutdown */
393 	mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
394 	/* physical interface power up */
395 	mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
396 	/* check for link */
397 	mac->ops.check_for_link = e1000_check_for_link_82575;
398 	/* receive address register setting */
399 	mac->ops.rar_set = e1000_rar_set_generic;
400 	/* read mac address */
401 	mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
402 	/* configure collision distance */
403 	mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
404 	/* multicast address update */
405 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
406 	/* writing VFTA */
407 	mac->ops.write_vfta = e1000_write_vfta_generic;
408 	/* clearing VFTA */
409 	mac->ops.clear_vfta = e1000_clear_vfta_generic;
410 	/* ID LED init */
411 	mac->ops.id_led_init = e1000_id_led_init_generic;
412 	/* blink LED */
413 	mac->ops.blink_led = e1000_blink_led_generic;
414 	/* setup LED */
415 	mac->ops.setup_led = e1000_setup_led_generic;
416 	/* cleanup LED */
417 	mac->ops.cleanup_led = e1000_cleanup_led_generic;
418 	/* turn on/off LED */
419 	mac->ops.led_on = e1000_led_on_generic;
420 	mac->ops.led_off = e1000_led_off_generic;
421 	/* clear hardware counters */
422 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
423 	/* link info */
424 	mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
425 
426 	/* set lan id for port to determine which phy lock to use */
427 	hw->mac.ops.set_lan_id(hw);
428 
429 	return E1000_SUCCESS;
430 }
431 
432 /**
433  *  e1000_init_function_pointers_82575 - Init func ptrs.
434  *  @hw: pointer to the HW structure
435  *
436  *  Called to initialize all function pointers and parameters.
437  **/
438 void e1000_init_function_pointers_82575(struct e1000_hw *hw)
439 {
440 	DEBUGFUNC("e1000_init_function_pointers_82575");
441 
442 	hw->mac.ops.init_params = e1000_init_mac_params_82575;
443 	hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
444 	hw->phy.ops.init_params = e1000_init_phy_params_82575;
445 	hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
446 }
447 
448 /**
449  *  e1000_acquire_phy_82575 - Acquire rights to access PHY
450  *  @hw: pointer to the HW structure
451  *
452  *  Acquire access rights to the correct PHY.
453  **/
454 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
455 {
456 	u16 mask = E1000_SWFW_PHY0_SM;
457 
458 	DEBUGFUNC("e1000_acquire_phy_82575");
459 
460 	if (hw->bus.func == E1000_FUNC_1)
461 		mask = E1000_SWFW_PHY1_SM;
462 	else if (hw->bus.func == E1000_FUNC_2)
463 		mask = E1000_SWFW_PHY2_SM;
464 	else if (hw->bus.func == E1000_FUNC_3)
465 		mask = E1000_SWFW_PHY3_SM;
466 
467 	return e1000_acquire_swfw_sync_82575(hw, mask);
468 }
469 
470 /**
471  *  e1000_release_phy_82575 - Release rights to access PHY
472  *  @hw: pointer to the HW structure
473  *
474  *  A wrapper to release access rights to the correct PHY.
475  **/
476 static void e1000_release_phy_82575(struct e1000_hw *hw)
477 {
478 	u16 mask = E1000_SWFW_PHY0_SM;
479 
480 	DEBUGFUNC("e1000_release_phy_82575");
481 
482 	if (hw->bus.func == E1000_FUNC_1)
483 		mask = E1000_SWFW_PHY1_SM;
484 	else if (hw->bus.func == E1000_FUNC_2)
485 		mask = E1000_SWFW_PHY2_SM;
486 	else if (hw->bus.func == E1000_FUNC_3)
487 		mask = E1000_SWFW_PHY3_SM;
488 
489 	e1000_release_swfw_sync_82575(hw, mask);
490 }
491 
492 /**
493  *  e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
494  *  @hw: pointer to the HW structure
495  *  @offset: register offset to be read
496  *  @data: pointer to the read data
497  *
498  *  Reads the PHY register at offset using the serial gigabit media independent
499  *  interface and stores the retrieved information in data.
500  **/
501 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
502                                           u16 *data)
503 {
504 	s32 ret_val = -E1000_ERR_PARAM;
505 
506 	DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
507 
508 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
509 		DEBUGOUT1("PHY Address %u is out of range\n", offset);
510 		goto out;
511 	}
512 
513 	ret_val = hw->phy.ops.acquire(hw);
514 	if (ret_val)
515 		goto out;
516 
517 	ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
518 
519 	hw->phy.ops.release(hw);
520 
521 out:
522 	return ret_val;
523 }
524 
525 /**
526  *  e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
527  *  @hw: pointer to the HW structure
528  *  @offset: register offset to write to
529  *  @data: data to write at register offset
530  *
531  *  Writes the data to PHY register at the offset using the serial gigabit
532  *  media independent interface.
533  **/
534 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
535                                            u16 data)
536 {
537 	s32 ret_val = -E1000_ERR_PARAM;
538 
539 	DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
540 
541 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
542 		DEBUGOUT1("PHY Address %d is out of range\n", offset);
543 		goto out;
544 	}
545 
546 	ret_val = hw->phy.ops.acquire(hw);
547 	if (ret_val)
548 		goto out;
549 
550 	ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
551 
552 	hw->phy.ops.release(hw);
553 
554 out:
555 	return ret_val;
556 }
557 
558 /**
559  *  e1000_get_phy_id_82575 - Retrieve PHY addr and id
560  *  @hw: pointer to the HW structure
561  *
562  *  Retrieves the PHY address and ID for both PHY's which do and do not use
563  *  sgmi interface.
564  **/
565 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
566 {
567 	struct e1000_phy_info *phy = &hw->phy;
568 	s32  ret_val = E1000_SUCCESS;
569 	u16 phy_id;
570 	u32 ctrl_ext;
571 	u32 mdic;
572 
573 	DEBUGFUNC("e1000_get_phy_id_82575");
574 
575 	/*
576 	 * For SGMII PHYs, we try the list of possible addresses until
577 	 * we find one that works.  For non-SGMII PHYs
578 	 * (e.g. integrated copper PHYs), an address of 1 should
579 	 * work.  The result of this function should mean phy->phy_addr
580 	 * and phy->id are set correctly.
581 	 */
582 	if (!e1000_sgmii_active_82575(hw)) {
583 		phy->addr = 1;
584 		ret_val = e1000_get_phy_id(hw);
585 		goto out;
586 	}
587 
588 	if (e1000_sgmii_uses_mdio_82575(hw)) {
589 		switch (hw->mac.type) {
590 		case e1000_82575:
591 		case e1000_82576:
592 			mdic = E1000_READ_REG(hw, E1000_MDIC);
593 			mdic &= E1000_MDIC_PHY_MASK;
594 			phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
595 			break;
596 		case e1000_82580:
597 		case e1000_i350:
598 			mdic = E1000_READ_REG(hw, E1000_MDICNFG);
599 			mdic &= E1000_MDICNFG_PHY_MASK;
600 			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
601 			break;
602 		default:
603 			ret_val = -E1000_ERR_PHY;
604 			goto out;
605 			break;
606 		}
607 		ret_val = e1000_get_phy_id(hw);
608 		goto out;
609 	}
610 
611 	/* Power on sgmii phy if it is disabled */
612 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
613 	E1000_WRITE_REG(hw, E1000_CTRL_EXT,
614 	                ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
615 	E1000_WRITE_FLUSH(hw);
616 	msec_delay(300);
617 
618 	/*
619 	 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
620 	 * Therefore, we need to test 1-7
621 	 */
622 	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
623 		ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
624 		if (ret_val == E1000_SUCCESS) {
625 			DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
626 			          phy_id,
627 			          phy->addr);
628 			/*
629 			 * At the time of this writing, The M88 part is
630 			 * the only supported SGMII PHY product.
631 			 */
632 			if (phy_id == M88_VENDOR)
633 				break;
634 		} else {
635 			DEBUGOUT1("PHY address %u was unreadable\n",
636 			          phy->addr);
637 		}
638 	}
639 
640 	/* A valid PHY type couldn't be found. */
641 	if (phy->addr == 8) {
642 		phy->addr = 0;
643 		ret_val = -E1000_ERR_PHY;
644 	} else {
645 		ret_val = e1000_get_phy_id(hw);
646 	}
647 
648 	/* restore previous sfp cage power state */
649 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
650 
651 out:
652 	return ret_val;
653 }
654 
655 /**
656  *  e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
657  *  @hw: pointer to the HW structure
658  *
659  *  Resets the PHY using the serial gigabit media independent interface.
660  **/
661 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
662 {
663 	s32 ret_val = E1000_SUCCESS;
664 
665 	DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
666 
667 	/*
668 	 * This isn't a TRUE "hard" reset, but is the only reset
669 	 * available to us at this time.
670 	 */
671 
672 	DEBUGOUT("Soft resetting SGMII attached PHY...\n");
673 
674 	if (!(hw->phy.ops.write_reg))
675 		goto out;
676 
677 	/*
678 	 * SFP documentation requires the following to configure the SPF module
679 	 * to work on SGMII.  No further documentation is given.
680 	 */
681 	ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
682 	if (ret_val)
683 		goto out;
684 
685 	ret_val = hw->phy.ops.commit(hw);
686 
687 out:
688 	return ret_val;
689 }
690 
691 /**
692  *  e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
693  *  @hw: pointer to the HW structure
694  *  @active: TRUE to enable LPLU, FALSE to disable
695  *
696  *  Sets the LPLU D0 state according to the active flag.  When
697  *  activating LPLU this function also disables smart speed
698  *  and vice versa.  LPLU will not be activated unless the
699  *  device autonegotiation advertisement meets standards of
700  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
701  *  This is a function pointer entry point only called by
702  *  PHY setup routines.
703  **/
704 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
705 {
706 	struct e1000_phy_info *phy = &hw->phy;
707 	s32 ret_val = E1000_SUCCESS;
708 	u16 data;
709 
710 	DEBUGFUNC("e1000_set_d0_lplu_state_82575");
711 
712 	if (!(hw->phy.ops.read_reg))
713 		goto out;
714 
715 	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
716 	if (ret_val)
717 		goto out;
718 
719 	if (active) {
720 		data |= IGP02E1000_PM_D0_LPLU;
721 		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
722 		                             data);
723 		if (ret_val)
724 			goto out;
725 
726 		/* When LPLU is enabled, we should disable SmartSpeed */
727 		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
728 		                            &data);
729 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
730 		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
731 		                             data);
732 		if (ret_val)
733 			goto out;
734 	} else {
735 		data &= ~IGP02E1000_PM_D0_LPLU;
736 		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
737 		                             data);
738 		/*
739 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
740 		 * during Dx states where the power conservation is most
741 		 * important.  During driver activity we should enable
742 		 * SmartSpeed, so performance is maintained.
743 		 */
744 		if (phy->smart_speed == e1000_smart_speed_on) {
745 			ret_val = phy->ops.read_reg(hw,
746 			                            IGP01E1000_PHY_PORT_CONFIG,
747 			                            &data);
748 			if (ret_val)
749 				goto out;
750 
751 			data |= IGP01E1000_PSCFR_SMART_SPEED;
752 			ret_val = phy->ops.write_reg(hw,
753 			                             IGP01E1000_PHY_PORT_CONFIG,
754 			                             data);
755 			if (ret_val)
756 				goto out;
757 		} else if (phy->smart_speed == e1000_smart_speed_off) {
758 			ret_val = phy->ops.read_reg(hw,
759 			                            IGP01E1000_PHY_PORT_CONFIG,
760 			                            &data);
761 			if (ret_val)
762 				goto out;
763 
764 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
765 			ret_val = phy->ops.write_reg(hw,
766 			                             IGP01E1000_PHY_PORT_CONFIG,
767 			                             data);
768 			if (ret_val)
769 				goto out;
770 		}
771 	}
772 
773 out:
774 	return ret_val;
775 }
776 
777 /**
778  *  e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
779  *  @hw: pointer to the HW structure
780  *  @active: TRUE to enable LPLU, FALSE to disable
781  *
782  *  Sets the LPLU D0 state according to the active flag.  When
783  *  activating LPLU this function also disables smart speed
784  *  and vice versa.  LPLU will not be activated unless the
785  *  device autonegotiation advertisement meets standards of
786  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
787  *  This is a function pointer entry point only called by
788  *  PHY setup routines.
789  **/
790 static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
791 {
792 	struct e1000_phy_info *phy = &hw->phy;
793 	s32 ret_val = E1000_SUCCESS;
794 	u16 data;
795 
796 	DEBUGFUNC("e1000_set_d0_lplu_state_82580");
797 
798 	data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
799 
800 	if (active) {
801 		data |= E1000_82580_PM_D0_LPLU;
802 
803 		/* When LPLU is enabled, we should disable SmartSpeed */
804 		data &= ~E1000_82580_PM_SPD;
805 	} else {
806 		data &= ~E1000_82580_PM_D0_LPLU;
807 
808 		/*
809 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
810 		 * during Dx states where the power conservation is most
811 		 * important.  During driver activity we should enable
812 		 * SmartSpeed, so performance is maintained.
813 		 */
814 		if (phy->smart_speed == e1000_smart_speed_on) {
815 			data |= E1000_82580_PM_SPD;
816 		} else if (phy->smart_speed == e1000_smart_speed_off) {
817 			data &= ~E1000_82580_PM_SPD;
818 		}
819 	}
820 
821 	E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
822 	return ret_val;
823 }
824 
825 /**
826  *  e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
827  *  @hw: pointer to the HW structure
828  *  @active: boolean used to enable/disable lplu
829  *
830  *  Success returns 0, Failure returns 1
831  *
832  *  The low power link up (lplu) state is set to the power management level D3
833  *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
834  *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
835  *  is used during Dx states where the power conservation is most important.
836  *  During driver activity, SmartSpeed should be enabled so performance is
837  *  maintained.
838  **/
839 s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
840 {
841 	struct e1000_phy_info *phy = &hw->phy;
842 	s32 ret_val = E1000_SUCCESS;
843 	u16 data;
844 
845 	DEBUGFUNC("e1000_set_d3_lplu_state_82580");
846 
847 	data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
848 
849 	if (!active) {
850 		data &= ~E1000_82580_PM_D3_LPLU;
851 		/*
852 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
853 		 * during Dx states where the power conservation is most
854 		 * important.  During driver activity we should enable
855 		 * SmartSpeed, so performance is maintained.
856 		 */
857 		if (phy->smart_speed == e1000_smart_speed_on) {
858 			data |= E1000_82580_PM_SPD;
859 		} else if (phy->smart_speed == e1000_smart_speed_off) {
860 			data &= ~E1000_82580_PM_SPD;
861 		}
862 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
863 	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
864 	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
865 		data |= E1000_82580_PM_D3_LPLU;
866 		/* When LPLU is enabled, we should disable SmartSpeed */
867 		data &= ~E1000_82580_PM_SPD;
868 	}
869 
870 	E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
871 	return ret_val;
872 }
873 
874 /**
875  *  e1000_acquire_nvm_82575 - Request for access to EEPROM
876  *  @hw: pointer to the HW structure
877  *
878  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
879  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
880  *  Return successful if access grant bit set, else clear the request for
881  *  EEPROM access and return -E1000_ERR_NVM (-1).
882  **/
883 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
884 {
885 	s32 ret_val;
886 
887 	DEBUGFUNC("e1000_acquire_nvm_82575");
888 
889 	ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
890 	if (ret_val)
891 		goto out;
892 
893 	/*
894 	 * Check if there is some access
895 	 * error this access may hook on
896 	 */
897 	if (hw->mac.type == e1000_i350) {
898 		u32 eecd = E1000_READ_REG(hw, E1000_EECD);
899 		if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
900 		    E1000_EECD_TIMEOUT)) {
901 			/* Clear all access error flags */
902 			E1000_WRITE_REG(hw, E1000_EECD, eecd |
903 					E1000_EECD_ERROR_CLR);
904 			DEBUGOUT("Nvm bit banging access error"
905 				" detected and cleared.\n");
906 		}
907 	}
908 	if (hw->mac.type == e1000_82580) {
909 		u32 eecd = E1000_READ_REG(hw, E1000_EECD);
910 		if (eecd & E1000_EECD_BLOCKED) {
911 			/* Clear access error flag */
912 			E1000_WRITE_REG(hw, E1000_EECD, eecd |
913 					E1000_EECD_BLOCKED);
914 			DEBUGOUT("Nvm bit banging access"
915 				" error detected and cleared.\n");
916 		}
917 	}
918 
919 	ret_val = e1000_acquire_nvm_generic(hw);
920 	if (ret_val)
921 		e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
922 
923 out:
924 	return ret_val;
925 }
926 
927 /**
928  *  e1000_release_nvm_82575 - Release exclusive access to EEPROM
929  *  @hw: pointer to the HW structure
930  *
931  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
932  *  then release the semaphores acquired.
933  **/
934 static void e1000_release_nvm_82575(struct e1000_hw *hw)
935 {
936 	DEBUGFUNC("e1000_release_nvm_82575");
937 
938 	e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
939 }
940 
941 /**
942  *  e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
943  *  @hw: pointer to the HW structure
944  *  @mask: specifies which semaphore to acquire
945  *
946  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
947  *  will also specify which port we're acquiring the lock for.
948  **/
949 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
950 {
951 	u32 swfw_sync;
952 	u32 swmask = mask;
953 	u32 fwmask = mask << 16;
954 	s32 ret_val = E1000_SUCCESS;
955 	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
956 
957 	DEBUGFUNC("e1000_acquire_swfw_sync_82575");
958 
959 	while (i < timeout) {
960 		if (e1000_get_hw_semaphore_generic(hw)) {
961 			ret_val = -E1000_ERR_SWFW_SYNC;
962 			goto out;
963 		}
964 
965 		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
966 		if (!(swfw_sync & (fwmask | swmask)))
967 			break;
968 
969 		/*
970 		 * Firmware currently using resource (fwmask)
971 		 * or other software thread using resource (swmask)
972 		 */
973 		e1000_put_hw_semaphore_generic(hw);
974 		msec_delay_irq(5);
975 		i++;
976 	}
977 
978 	if (i == timeout) {
979 		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
980 		ret_val = -E1000_ERR_SWFW_SYNC;
981 		goto out;
982 	}
983 
984 	swfw_sync |= swmask;
985 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
986 
987 	e1000_put_hw_semaphore_generic(hw);
988 
989 out:
990 	return ret_val;
991 }
992 
993 /**
994  *  e1000_release_swfw_sync_82575 - Release SW/FW semaphore
995  *  @hw: pointer to the HW structure
996  *  @mask: specifies which semaphore to acquire
997  *
998  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
999  *  will also specify which port we're releasing the lock for.
1000  **/
1001 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1002 {
1003 	u32 swfw_sync;
1004 
1005 	DEBUGFUNC("e1000_release_swfw_sync_82575");
1006 
1007 	while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
1008 	/* Empty */
1009 
1010 	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
1011 	swfw_sync &= ~mask;
1012 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
1013 
1014 	e1000_put_hw_semaphore_generic(hw);
1015 }
1016 
1017 /**
1018  *  e1000_get_cfg_done_82575 - Read config done bit
1019  *  @hw: pointer to the HW structure
1020  *
1021  *  Read the management control register for the config done bit for
1022  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
1023  *  to read the config done bit, so an error is *ONLY* logged and returns
1024  *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
1025  *  would not be able to be reset or change link.
1026  **/
1027 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
1028 {
1029 	s32 timeout = PHY_CFG_TIMEOUT;
1030 	s32 ret_val = E1000_SUCCESS;
1031 	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1032 
1033 	DEBUGFUNC("e1000_get_cfg_done_82575");
1034 
1035 	if (hw->bus.func == E1000_FUNC_1)
1036 		mask = E1000_NVM_CFG_DONE_PORT_1;
1037 	else if (hw->bus.func == E1000_FUNC_2)
1038 		mask = E1000_NVM_CFG_DONE_PORT_2;
1039 	else if (hw->bus.func == E1000_FUNC_3)
1040 		mask = E1000_NVM_CFG_DONE_PORT_3;
1041 	while (timeout) {
1042 		if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
1043 			break;
1044 		msec_delay(1);
1045 		timeout--;
1046 	}
1047 	if (!timeout)
1048 		DEBUGOUT("MNG configuration cycle has not completed.\n");
1049 
1050 	/* If EEPROM is not marked present, init the PHY manually */
1051 	if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
1052 	    (hw->phy.type == e1000_phy_igp_3))
1053 		e1000_phy_init_script_igp3(hw);
1054 
1055 	return ret_val;
1056 }
1057 
1058 /**
1059  *  e1000_get_link_up_info_82575 - Get link speed/duplex info
1060  *  @hw: pointer to the HW structure
1061  *  @speed: stores the current speed
1062  *  @duplex: stores the current duplex
1063  *
1064  *  This is a wrapper function, if using the serial gigabit media independent
1065  *  interface, use PCS to retrieve the link speed and duplex information.
1066  *  Otherwise, use the generic function to get the link speed and duplex info.
1067  **/
1068 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
1069                                         u16 *duplex)
1070 {
1071 	s32 ret_val;
1072 
1073 	DEBUGFUNC("e1000_get_link_up_info_82575");
1074 
1075 	if (hw->phy.media_type != e1000_media_type_copper)
1076 		ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
1077 		                                               duplex);
1078 	else
1079 		ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
1080 		                                                    duplex);
1081 
1082 	return ret_val;
1083 }
1084 
1085 /**
1086  *  e1000_check_for_link_82575 - Check for link
1087  *  @hw: pointer to the HW structure
1088  *
1089  *  If sgmii is enabled, then use the pcs register to determine link, otherwise
1090  *  use the generic interface for determining link.
1091  **/
1092 static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
1093 {
1094 	s32 ret_val;
1095 	u16 speed, duplex;
1096 
1097 	DEBUGFUNC("e1000_check_for_link_82575");
1098 
1099 	if (hw->phy.media_type != e1000_media_type_copper) {
1100 		ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
1101 		                                               &duplex);
1102 		/*
1103 		 * Use this flag to determine if link needs to be checked or
1104 		 * not.  If we have link clear the flag so that we do not
1105 		 * continue to check for link.
1106 		 */
1107 		hw->mac.get_link_status = !hw->mac.serdes_has_link;
1108 	} else {
1109 		ret_val = e1000_check_for_copper_link_generic(hw);
1110 	}
1111 
1112 	return ret_val;
1113 }
1114 
1115 /**
1116  *  e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1117  *  @hw: pointer to the HW structure
1118  **/
1119 static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
1120 {
1121 	u32 reg;
1122 
1123 	DEBUGFUNC("e1000_power_up_serdes_link_82575");
1124 
1125 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1126 	    !e1000_sgmii_active_82575(hw))
1127 		return;
1128 
1129 	/* Enable PCS to turn on link */
1130 	reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1131 	reg |= E1000_PCS_CFG_PCS_EN;
1132 	E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1133 
1134 	/* Power up the laser */
1135 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1136 	reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1137 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1138 
1139 	/* flush the write to verify completion */
1140 	E1000_WRITE_FLUSH(hw);
1141 	msec_delay(1);
1142 }
1143 
1144 /**
1145  *  e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1146  *  @hw: pointer to the HW structure
1147  *  @speed: stores the current speed
1148  *  @duplex: stores the current duplex
1149  *
1150  *  Using the physical coding sub-layer (PCS), retrieve the current speed and
1151  *  duplex, then store the values in the pointers provided.
1152  **/
1153 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
1154                                                 u16 *speed, u16 *duplex)
1155 {
1156 	struct e1000_mac_info *mac = &hw->mac;
1157 	u32 pcs;
1158 
1159 	DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
1160 
1161 	/* Set up defaults for the return values of this function */
1162 	mac->serdes_has_link = FALSE;
1163 	*speed = 0;
1164 	*duplex = 0;
1165 
1166 	/*
1167 	 * Read the PCS Status register for link state. For non-copper mode,
1168 	 * the status register is not accurate. The PCS status register is
1169 	 * used instead.
1170 	 */
1171 	pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
1172 
1173 	/*
1174 	 * The link up bit determines when link is up on autoneg. The sync ok
1175 	 * gets set once both sides sync up and agree upon link. Stable link
1176 	 * can be determined by checking for both link up and link sync ok
1177 	 */
1178 	if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1179 		mac->serdes_has_link = TRUE;
1180 
1181 		/* Detect and store PCS speed */
1182 		if (pcs & E1000_PCS_LSTS_SPEED_1000) {
1183 			*speed = SPEED_1000;
1184 		} else if (pcs & E1000_PCS_LSTS_SPEED_100) {
1185 			*speed = SPEED_100;
1186 		} else {
1187 			*speed = SPEED_10;
1188 		}
1189 
1190 		/* Detect and store PCS duplex */
1191 		if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
1192 			*duplex = FULL_DUPLEX;
1193 		} else {
1194 			*duplex = HALF_DUPLEX;
1195 		}
1196 	}
1197 
1198 	return E1000_SUCCESS;
1199 }
1200 
1201 /**
1202  *  e1000_shutdown_serdes_link_82575 - Remove link during power down
1203  *  @hw: pointer to the HW structure
1204  *
1205  *  In the case of serdes shut down sfp and PCS on driver unload
1206  *  when management pass thru is not enabled.
1207  **/
1208 void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
1209 {
1210 	u32 reg;
1211 
1212 	DEBUGFUNC("e1000_shutdown_serdes_link_82575");
1213 
1214 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1215 	    !e1000_sgmii_active_82575(hw))
1216 		return;
1217 
1218 	if (!e1000_enable_mng_pass_thru(hw)) {
1219 		/* Disable PCS to turn off link */
1220 		reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1221 		reg &= ~E1000_PCS_CFG_PCS_EN;
1222 		E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1223 
1224 		/* shutdown the laser */
1225 		reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1226 		reg |= E1000_CTRL_EXT_SDP3_DATA;
1227 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1228 
1229 		/* flush the write to verify completion */
1230 		E1000_WRITE_FLUSH(hw);
1231 		msec_delay(1);
1232 	}
1233 
1234 	return;
1235 }
1236 
1237 /**
1238  *  e1000_reset_hw_82575 - Reset hardware
1239  *  @hw: pointer to the HW structure
1240  *
1241  *  This resets the hardware into a known state.
1242  **/
1243 static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
1244 {
1245 	u32 ctrl;
1246 	s32 ret_val;
1247 
1248 	DEBUGFUNC("e1000_reset_hw_82575");
1249 
1250 	/*
1251 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
1252 	 * on the last TLP read/write transaction when MAC is reset.
1253 	 */
1254 	ret_val = e1000_disable_pcie_master_generic(hw);
1255 	if (ret_val) {
1256 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
1257 	}
1258 
1259 	/* set the completion timeout for interface */
1260 	ret_val = e1000_set_pcie_completion_timeout(hw);
1261 	if (ret_val) {
1262 		DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1263 	}
1264 
1265 	DEBUGOUT("Masking off all interrupts\n");
1266 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1267 
1268 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
1269 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1270 	E1000_WRITE_FLUSH(hw);
1271 
1272 	msec_delay(10);
1273 
1274 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
1275 
1276 	DEBUGOUT("Issuing a global reset to MAC\n");
1277 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
1278 
1279 	ret_val = e1000_get_auto_rd_done_generic(hw);
1280 	if (ret_val) {
1281 		/*
1282 		 * When auto config read does not complete, do not
1283 		 * return with an error. This can happen in situations
1284 		 * where there is no eeprom and prevents getting link.
1285 		 */
1286 		DEBUGOUT("Auto Read Done did not complete\n");
1287 	}
1288 
1289 	/* If EEPROM is not present, run manual init scripts */
1290 	if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1291 		e1000_reset_init_script_82575(hw);
1292 
1293 	/* Clear any pending interrupt events. */
1294 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1295 	E1000_READ_REG(hw, E1000_ICR);
1296 
1297 	/* Install any alternate MAC address into RAR0 */
1298 	ret_val = e1000_check_alt_mac_addr_generic(hw);
1299 
1300 	return ret_val;
1301 }
1302 
1303 /**
1304  *  e1000_init_hw_82575 - Initialize hardware
1305  *  @hw: pointer to the HW structure
1306  *
1307  *  This inits the hardware readying it for operation.
1308  **/
1309 static s32 e1000_init_hw_82575(struct e1000_hw *hw)
1310 {
1311 	struct e1000_mac_info *mac = &hw->mac;
1312 	s32 ret_val;
1313 	u16 i, rar_count = mac->rar_entry_count;
1314 
1315 	DEBUGFUNC("e1000_init_hw_82575");
1316 
1317 	/* Initialize identification LED */
1318 	ret_val = mac->ops.id_led_init(hw);
1319 	if (ret_val) {
1320 		DEBUGOUT("Error initializing identification LED\n");
1321 		/* This is not fatal and we should not stop init due to this */
1322 	}
1323 
1324 	/* Disabling VLAN filtering */
1325 	DEBUGOUT("Initializing the IEEE VLAN\n");
1326 	mac->ops.clear_vfta(hw);
1327 
1328 	/* Setup the receive address */
1329 	e1000_init_rx_addrs_generic(hw, rar_count);
1330 
1331 	/* Zero out the Multicast HASH table */
1332 	DEBUGOUT("Zeroing the MTA\n");
1333 	for (i = 0; i < mac->mta_reg_count; i++)
1334 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1335 
1336 	/* Zero out the Unicast HASH table */
1337 	DEBUGOUT("Zeroing the UTA\n");
1338 	for (i = 0; i < mac->uta_reg_count; i++)
1339 		E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
1340 
1341 	/* Setup link and flow control */
1342 	ret_val = mac->ops.setup_link(hw);
1343 
1344 	/*
1345 	 * Clear all of the statistics registers (clear on read).  It is
1346 	 * important that we do this after we have tried to establish link
1347 	 * because the symbol error count will increment wildly if there
1348 	 * is no link.
1349 	 */
1350 	e1000_clear_hw_cntrs_82575(hw);
1351 
1352 	return ret_val;
1353 }
1354 
1355 /**
1356  *  e1000_setup_copper_link_82575 - Configure copper link settings
1357  *  @hw: pointer to the HW structure
1358  *
1359  *  Configures the link for auto-neg or forced speed and duplex.  Then we check
1360  *  for link, once link is established calls to configure collision distance
1361  *  and flow control are called.
1362  **/
1363 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1364 {
1365 	u32 ctrl;
1366 	s32  ret_val;
1367 
1368 	DEBUGFUNC("e1000_setup_copper_link_82575");
1369 
1370 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
1371 	ctrl |= E1000_CTRL_SLU;
1372 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1373 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1374 
1375 	ret_val = e1000_setup_serdes_link_82575(hw);
1376 	if (ret_val)
1377 		goto out;
1378 
1379 	if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1380 		/* allow time for SFP cage time to power up phy */
1381 		msec_delay(300);
1382 
1383 		ret_val = hw->phy.ops.reset(hw);
1384 		if (ret_val) {
1385 			DEBUGOUT("Error resetting the PHY.\n");
1386 			goto out;
1387 		}
1388 	}
1389 	switch (hw->phy.type) {
1390 	case e1000_phy_m88:
1391 		if (hw->phy.id == I347AT4_E_PHY_ID ||
1392 		    hw->phy.id == M88E1112_E_PHY_ID ||
1393 		    hw->phy.id == M88E1340M_E_PHY_ID)
1394 			ret_val = e1000_copper_link_setup_m88_gen2(hw);
1395 		else
1396 			ret_val = e1000_copper_link_setup_m88(hw);
1397 		break;
1398 	case e1000_phy_igp_3:
1399 		ret_val = e1000_copper_link_setup_igp(hw);
1400 		break;
1401 	case e1000_phy_82580:
1402 		ret_val = e1000_copper_link_setup_82577(hw);
1403 		break;
1404 	default:
1405 		ret_val = -E1000_ERR_PHY;
1406 		break;
1407 	}
1408 
1409 	if (ret_val)
1410 		goto out;
1411 
1412 	ret_val = e1000_setup_copper_link_generic(hw);
1413 out:
1414 	return ret_val;
1415 }
1416 
1417 /**
1418  *  e1000_setup_serdes_link_82575 - Setup link for serdes
1419  *  @hw: pointer to the HW structure
1420  *
1421  *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
1422  *  used on copper connections where the serialized gigabit media independent
1423  *  interface (sgmii), or serdes fiber is being used.  Configures the link
1424  *  for auto-negotiation or forces speed/duplex.
1425  **/
1426 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
1427 {
1428 	u32 ctrl_ext, ctrl_reg, reg;
1429 	bool pcs_autoneg;
1430 
1431 	DEBUGFUNC("e1000_setup_serdes_link_82575");
1432 
1433 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1434 	    !e1000_sgmii_active_82575(hw))
1435 		return E1000_SUCCESS;
1436 
1437 	/*
1438 	 * On the 82575, SerDes loopback mode persists until it is
1439 	 * explicitly turned off or a power cycle is performed.  A read to
1440 	 * the register does not indicate its status.  Therefore, we ensure
1441 	 * loopback mode is disabled during initialization.
1442 	 */
1443 	E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1444 
1445 	/* power on the sfp cage if present */
1446 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1447 	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1448 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1449 
1450 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1451 	ctrl_reg |= E1000_CTRL_SLU;
1452 
1453 	/* set both sw defined pins on 82575/82576*/
1454 	if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
1455 		ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1456 
1457 	reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1458 
1459 	/* default pcs_autoneg to the same setting as mac autoneg */
1460 	pcs_autoneg = hw->mac.autoneg;
1461 
1462 	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1463 	case E1000_CTRL_EXT_LINK_MODE_SGMII:
1464 		/* sgmii mode lets the phy handle forcing speed/duplex */
1465 		pcs_autoneg = TRUE;
1466 		/* autoneg time out should be disabled for SGMII mode */
1467 		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1468 		break;
1469 	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1470 		/* disable PCS autoneg and support parallel detect only */
1471 		pcs_autoneg = FALSE;
1472 		/* fall through to default case */
1473 	default:
1474 		/*
1475 		 * non-SGMII modes only supports a speed of 1000/Full for the
1476 		 * link so it is best to just force the MAC and let the pcs
1477 		 * link either autoneg or be forced to 1000/Full
1478 		 */
1479 		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1480 		            E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1481 
1482 		/* set speed of 1000/Full if speed/duplex is forced */
1483 		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1484 		break;
1485 	}
1486 
1487 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1488 
1489 	/*
1490 	 * New SerDes mode allows for forcing speed or autonegotiating speed
1491 	 * at 1gb. Autoneg should be default set by most drivers. This is the
1492 	 * mode that will be compatible with older link partners and switches.
1493 	 * However, both are supported by the hardware and some drivers/tools.
1494 	 */
1495 	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1496 	         E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1497 
1498 	/*
1499 	 * We force flow control to prevent the CTRL register values from being
1500 	 * overwritten by the autonegotiated flow control values
1501 	 */
1502 	reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1503 
1504 	if (pcs_autoneg) {
1505 		/* Set PCS register for autoneg */
1506 		reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1507 		       E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1508 		DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1509 	} else {
1510 		/* Set PCS register for forced link */
1511 		reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
1512 		DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1513 	}
1514 
1515 	E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1516 
1517 	if (!e1000_sgmii_active_82575(hw))
1518 		e1000_force_mac_fc_generic(hw);
1519 
1520 	return E1000_SUCCESS;
1521 }
1522 
1523 /**
1524  *  e1000_valid_led_default_82575 - Verify a valid default LED config
1525  *  @hw: pointer to the HW structure
1526  *  @data: pointer to the NVM (EEPROM)
1527  *
1528  *  Read the EEPROM for the current default LED configuration.  If the
1529  *  LED configuration is not valid, set to a valid LED configuration.
1530  **/
1531 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
1532 {
1533 	s32 ret_val;
1534 
1535 	DEBUGFUNC("e1000_valid_led_default_82575");
1536 
1537 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1538 	if (ret_val) {
1539 		DEBUGOUT("NVM Read Error\n");
1540 		goto out;
1541 	}
1542 
1543 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1544 		switch(hw->phy.media_type) {
1545 		case e1000_media_type_internal_serdes:
1546 			*data = ID_LED_DEFAULT_82575_SERDES;
1547 			break;
1548 		case e1000_media_type_copper:
1549 		default:
1550 			*data = ID_LED_DEFAULT;
1551 			break;
1552 		}
1553 	}
1554 out:
1555 	return ret_val;
1556 }
1557 
1558 /**
1559  *  e1000_sgmii_active_82575 - Return sgmii state
1560  *  @hw: pointer to the HW structure
1561  *
1562  *  82575 silicon has a serialized gigabit media independent interface (sgmii)
1563  *  which can be enabled for use in the embedded applications.  Simply
1564  *  return the current state of the sgmii interface.
1565  **/
1566 static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
1567 {
1568 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1569 	return dev_spec->sgmii_active;
1570 }
1571 
1572 /**
1573  *  e1000_reset_init_script_82575 - Inits HW defaults after reset
1574  *  @hw: pointer to the HW structure
1575  *
1576  *  Inits recommended HW defaults after a reset when there is no EEPROM
1577  *  detected. This is only for the 82575.
1578  **/
1579 static s32 e1000_reset_init_script_82575(struct e1000_hw* hw)
1580 {
1581 	DEBUGFUNC("e1000_reset_init_script_82575");
1582 
1583 	if (hw->mac.type == e1000_82575) {
1584 		DEBUGOUT("Running reset init script for 82575\n");
1585 		/* SerDes configuration via SERDESCTRL */
1586 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
1587 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
1588 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
1589 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
1590 
1591 		/* CCM configuration via CCMCTL register */
1592 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
1593 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
1594 
1595 		/* PCIe lanes configuration */
1596 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
1597 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
1598 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
1599 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
1600 
1601 		/* PCIe PLL Configuration */
1602 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
1603 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
1604 		e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
1605 	}
1606 
1607 	return E1000_SUCCESS;
1608 }
1609 
1610 /**
1611  *  e1000_read_mac_addr_82575 - Read device MAC address
1612  *  @hw: pointer to the HW structure
1613  **/
1614 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
1615 {
1616 	s32 ret_val = E1000_SUCCESS;
1617 
1618 	DEBUGFUNC("e1000_read_mac_addr_82575");
1619 
1620 	/*
1621 	 * If there's an alternate MAC address place it in RAR0
1622 	 * so that it will override the Si installed default perm
1623 	 * address.
1624 	 */
1625 	ret_val = e1000_check_alt_mac_addr_generic(hw);
1626 	if (ret_val)
1627 		goto out;
1628 
1629 	ret_val = e1000_read_mac_addr_generic(hw);
1630 
1631 out:
1632 	return ret_val;
1633 }
1634 
1635 /**
1636  *  e1000_config_collision_dist_82575 - Configure collision distance
1637  *  @hw: pointer to the HW structure
1638  *
1639  *  Configures the collision distance to the default value and is used
1640  *  during link setup.
1641  **/
1642 static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
1643 {
1644 	u32 tctl_ext;
1645 
1646 	DEBUGFUNC("e1000_config_collision_dist_82575");
1647 
1648 	tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
1649 
1650 	tctl_ext &= ~E1000_TCTL_EXT_COLD;
1651 	tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
1652 
1653 	E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
1654 	E1000_WRITE_FLUSH(hw);
1655 }
1656 
1657 /**
1658  * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
1659  * @hw: pointer to the HW structure
1660  *
1661  * In the case of a PHY power down to save power, or to turn off link during a
1662  * driver unload, or wake on lan is not enabled, remove the link.
1663  **/
1664 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
1665 {
1666 	struct e1000_phy_info *phy = &hw->phy;
1667 
1668 	if (!(phy->ops.check_reset_block))
1669 		return;
1670 
1671 	/* If the management interface is not enabled, then power down */
1672 	if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
1673 		e1000_power_down_phy_copper(hw);
1674 
1675 	return;
1676 }
1677 
1678 /**
1679  *  e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
1680  *  @hw: pointer to the HW structure
1681  *
1682  *  Clears the hardware counters by reading the counter registers.
1683  **/
1684 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
1685 {
1686 	DEBUGFUNC("e1000_clear_hw_cntrs_82575");
1687 
1688 	e1000_clear_hw_cntrs_base_generic(hw);
1689 
1690 	E1000_READ_REG(hw, E1000_PRC64);
1691 	E1000_READ_REG(hw, E1000_PRC127);
1692 	E1000_READ_REG(hw, E1000_PRC255);
1693 	E1000_READ_REG(hw, E1000_PRC511);
1694 	E1000_READ_REG(hw, E1000_PRC1023);
1695 	E1000_READ_REG(hw, E1000_PRC1522);
1696 	E1000_READ_REG(hw, E1000_PTC64);
1697 	E1000_READ_REG(hw, E1000_PTC127);
1698 	E1000_READ_REG(hw, E1000_PTC255);
1699 	E1000_READ_REG(hw, E1000_PTC511);
1700 	E1000_READ_REG(hw, E1000_PTC1023);
1701 	E1000_READ_REG(hw, E1000_PTC1522);
1702 
1703 	E1000_READ_REG(hw, E1000_ALGNERRC);
1704 	E1000_READ_REG(hw, E1000_RXERRC);
1705 	E1000_READ_REG(hw, E1000_TNCRS);
1706 	E1000_READ_REG(hw, E1000_CEXTERR);
1707 	E1000_READ_REG(hw, E1000_TSCTC);
1708 	E1000_READ_REG(hw, E1000_TSCTFC);
1709 
1710 	E1000_READ_REG(hw, E1000_MGTPRC);
1711 	E1000_READ_REG(hw, E1000_MGTPDC);
1712 	E1000_READ_REG(hw, E1000_MGTPTC);
1713 
1714 	E1000_READ_REG(hw, E1000_IAC);
1715 	E1000_READ_REG(hw, E1000_ICRXOC);
1716 
1717 	E1000_READ_REG(hw, E1000_ICRXPTC);
1718 	E1000_READ_REG(hw, E1000_ICRXATC);
1719 	E1000_READ_REG(hw, E1000_ICTXPTC);
1720 	E1000_READ_REG(hw, E1000_ICTXATC);
1721 	E1000_READ_REG(hw, E1000_ICTXQEC);
1722 	E1000_READ_REG(hw, E1000_ICTXQMTC);
1723 	E1000_READ_REG(hw, E1000_ICRXDMTC);
1724 
1725 	E1000_READ_REG(hw, E1000_CBTMPC);
1726 	E1000_READ_REG(hw, E1000_HTDPMC);
1727 	E1000_READ_REG(hw, E1000_CBRMPC);
1728 	E1000_READ_REG(hw, E1000_RPTHC);
1729 	E1000_READ_REG(hw, E1000_HGPTC);
1730 	E1000_READ_REG(hw, E1000_HTCBDPC);
1731 	E1000_READ_REG(hw, E1000_HGORCL);
1732 	E1000_READ_REG(hw, E1000_HGORCH);
1733 	E1000_READ_REG(hw, E1000_HGOTCL);
1734 	E1000_READ_REG(hw, E1000_HGOTCH);
1735 	E1000_READ_REG(hw, E1000_LENERRS);
1736 
1737 	/* This register should not be read in copper configurations */
1738 	if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
1739 	    e1000_sgmii_active_82575(hw))
1740 		E1000_READ_REG(hw, E1000_SCVPC);
1741 }
1742 
1743 /**
1744  *  e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
1745  *  @hw: pointer to the HW structure
1746  *
1747  *  After rx enable if managability is enabled then there is likely some
1748  *  bad data at the start of the fifo and possibly in the DMA fifo.  This
1749  *  function clears the fifos and flushes any packets that came in as rx was
1750  *  being enabled.
1751  **/
1752 void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
1753 {
1754 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1755 	int i, ms_wait;
1756 
1757 	DEBUGFUNC("e1000_rx_fifo_workaround_82575");
1758 	if (hw->mac.type != e1000_82575 ||
1759 	    !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1760 		return;
1761 
1762 	/* Disable all Rx queues */
1763 	for (i = 0; i < 4; i++) {
1764 		rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
1765 		E1000_WRITE_REG(hw, E1000_RXDCTL(i),
1766 		                rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1767 	}
1768 	/* Poll all queues to verify they have shut down */
1769 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1770 		msec_delay(1);
1771 		rx_enabled = 0;
1772 		for (i = 0; i < 4; i++)
1773 			rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
1774 		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1775 			break;
1776 	}
1777 
1778 	if (ms_wait == 10)
1779 		DEBUGOUT("Queue disable timed out after 10ms\n");
1780 
1781 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1782 	 * incoming packets are rejected.  Set enable and wait 2ms so that
1783 	 * any packet that was coming in as RCTL.EN was set is flushed
1784 	 */
1785 	rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1786 	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1787 
1788 	rlpml = E1000_READ_REG(hw, E1000_RLPML);
1789 	E1000_WRITE_REG(hw, E1000_RLPML, 0);
1790 
1791 	rctl = E1000_READ_REG(hw, E1000_RCTL);
1792 	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1793 	temp_rctl |= E1000_RCTL_LPE;
1794 
1795 	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
1796 	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1797 	E1000_WRITE_FLUSH(hw);
1798 	msec_delay(2);
1799 
1800 	/* Enable Rx queues that were previously enabled and restore our
1801 	 * previous state
1802 	 */
1803 	for (i = 0; i < 4; i++)
1804 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
1805 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1806 	E1000_WRITE_FLUSH(hw);
1807 
1808 	E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
1809 	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1810 
1811 	/* Flush receive errors generated by workaround */
1812 	E1000_READ_REG(hw, E1000_ROC);
1813 	E1000_READ_REG(hw, E1000_RNBC);
1814 	E1000_READ_REG(hw, E1000_MPC);
1815 }
1816 
1817 /**
1818  *  e1000_set_pcie_completion_timeout - set pci-e completion timeout
1819  *  @hw: pointer to the HW structure
1820  *
1821  *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1822  *  however the hardware default for these parts is 500us to 1ms which is less
1823  *  than the 10ms recommended by the pci-e spec.  To address this we need to
1824  *  increase the value to either 10ms to 200ms for capability version 1 config,
1825  *  or 16ms to 55ms for version 2.
1826  **/
1827 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
1828 {
1829 	u32 gcr = E1000_READ_REG(hw, E1000_GCR);
1830 	s32 ret_val = E1000_SUCCESS;
1831 	u16 pcie_devctl2;
1832 
1833 	/* only take action if timeout value is defaulted to 0 */
1834 	if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1835 		goto out;
1836 
1837 	/*
1838 	 * if capababilities version is type 1 we can write the
1839 	 * timeout of 10ms to 200ms through the GCR register
1840 	 */
1841 	if (!(gcr & E1000_GCR_CAP_VER2)) {
1842 		gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1843 		goto out;
1844 	}
1845 
1846 	/*
1847 	 * for version 2 capabilities we need to write the config space
1848 	 * directly in order to set the completion timeout value for
1849 	 * 16ms to 55ms
1850 	 */
1851 	ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1852 	                                  &pcie_devctl2);
1853 	if (ret_val)
1854 		goto out;
1855 
1856 	pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1857 
1858 	ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1859 	                                   &pcie_devctl2);
1860 out:
1861 	/* disable completion timeout resend */
1862 	gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1863 
1864 	E1000_WRITE_REG(hw, E1000_GCR, gcr);
1865 	return ret_val;
1866 }
1867 
1868 /**
1869  *  e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
1870  *  @hw: pointer to the hardware struct
1871  *  @enable: state to enter, either enabled or disabled
1872  *  @pf: Physical Function pool - do not set anti-spoofing for the PF
1873  *
1874  *  enables/disables L2 switch anti-spoofing functionality.
1875  **/
1876 void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1877 {
1878 	u32 dtxswc;
1879 
1880 	switch (hw->mac.type) {
1881 	case e1000_82576:
1882 		dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1883 		if (enable) {
1884 			dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1885 				   E1000_DTXSWC_VLAN_SPOOF_MASK);
1886 			/* The PF can spoof - it has to in order to
1887 			 * support emulation mode NICs */
1888 			dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1889 		} else {
1890 			dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1891 				    E1000_DTXSWC_VLAN_SPOOF_MASK);
1892 		}
1893 		E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1894 		break;
1895 	case e1000_i350:
1896 		dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
1897 		if (enable) {
1898 			dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1899 				   E1000_DTXSWC_VLAN_SPOOF_MASK);
1900 			/* The PF can spoof - it has to in order to
1901 			 * support emulation mode NICs
1902 			 */
1903 			dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1904 		} else {
1905 			dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1906 				    E1000_DTXSWC_VLAN_SPOOF_MASK);
1907 		}
1908 		E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
1909 	default:
1910 		break;
1911 	}
1912 }
1913 
1914 /**
1915  *  e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
1916  *  @hw: pointer to the hardware struct
1917  *  @enable: state to enter, either enabled or disabled
1918  *
1919  *  enables/disables L2 switch loopback functionality.
1920  **/
1921 void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1922 {
1923 	u32 dtxswc;
1924 
1925 	switch (hw->mac.type) {
1926 	case e1000_82576:
1927 		dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1928 		if (enable)
1929 			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1930 		else
1931 			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1932 		E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1933 		break;
1934 	case e1000_i350:
1935 		dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
1936 		if (enable)
1937 			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1938 		else
1939 			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1940 		E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
1941 		break;
1942 	default:
1943 		/* Currently no other hardware supports loopback */
1944 		break;
1945 	}
1946 
1947 
1948 }
1949 
1950 /**
1951  *  e1000_vmdq_set_replication_pf - enable or disable vmdq replication
1952  *  @hw: pointer to the hardware struct
1953  *  @enable: state to enter, either enabled or disabled
1954  *
1955  *  enables/disables replication of packets across multiple pools.
1956  **/
1957 void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1958 {
1959 	u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1960 
1961 	if (enable)
1962 		vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1963 	else
1964 		vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1965 
1966 	E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1967 }
1968 
1969 /**
1970  *  e1000_read_phy_reg_82580 - Read 82580 MDI control register
1971  *  @hw: pointer to the HW structure
1972  *  @offset: register offset to be read
1973  *  @data: pointer to the read data
1974  *
1975  *  Reads the MDI control register in the PHY at offset and stores the
1976  *  information read to data.
1977  **/
1978 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1979 {
1980 	s32 ret_val;
1981 
1982 	DEBUGFUNC("e1000_read_phy_reg_82580");
1983 
1984 	ret_val = hw->phy.ops.acquire(hw);
1985 	if (ret_val)
1986 		goto out;
1987 
1988 	ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
1989 
1990 	hw->phy.ops.release(hw);
1991 
1992 out:
1993 	return ret_val;
1994 }
1995 
1996 /**
1997  *  e1000_write_phy_reg_82580 - Write 82580 MDI control register
1998  *  @hw: pointer to the HW structure
1999  *  @offset: register offset to write to
2000  *  @data: data to write to register at offset
2001  *
2002  *  Writes data to MDI control register in the PHY at offset.
2003  **/
2004 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
2005 {
2006 	s32 ret_val;
2007 
2008 	DEBUGFUNC("e1000_write_phy_reg_82580");
2009 
2010 	ret_val = hw->phy.ops.acquire(hw);
2011 	if (ret_val)
2012 		goto out;
2013 
2014 	ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
2015 
2016 	hw->phy.ops.release(hw);
2017 
2018 out:
2019 	return ret_val;
2020 }
2021 
2022 /**
2023  *  e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
2024  *  @hw: pointer to the HW structure
2025  *
2026  *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
2027  *  the values found in the EEPROM.  This addresses an issue in which these
2028  *  bits are not restored from EEPROM after reset.
2029  **/
2030 static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
2031 {
2032 	s32 ret_val = E1000_SUCCESS;
2033 	u32 mdicnfg;
2034 	u16 nvm_data = 0;
2035 
2036 	DEBUGFUNC("e1000_reset_mdicnfg_82580");
2037 
2038 	if (hw->mac.type != e1000_82580)
2039 		goto out;
2040 	if (!e1000_sgmii_active_82575(hw))
2041 		goto out;
2042 
2043 	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2044 	                           NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2045 	                           &nvm_data);
2046 	if (ret_val) {
2047 		DEBUGOUT("NVM Read Error\n");
2048 		goto out;
2049 	}
2050 
2051 	mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
2052 	if (nvm_data & NVM_WORD24_EXT_MDIO)
2053 		mdicnfg |= E1000_MDICNFG_EXT_MDIO;
2054 	if (nvm_data & NVM_WORD24_COM_MDIO)
2055 		mdicnfg |= E1000_MDICNFG_COM_MDIO;
2056 	E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
2057 out:
2058 	return ret_val;
2059 }
2060 
2061 /**
2062  *  e1000_reset_hw_82580 - Reset hardware
2063  *  @hw: pointer to the HW structure
2064  *
2065  *  This resets function or entire device (all ports, etc.)
2066  *  to a known state.
2067  **/
2068 static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
2069 {
2070 	s32 ret_val = E1000_SUCCESS;
2071 	/* BH SW mailbox bit in SW_FW_SYNC */
2072 	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2073 	u32 ctrl;
2074 	bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2075 
2076 	DEBUGFUNC("e1000_reset_hw_82580");
2077 
2078 	hw->dev_spec._82575.global_device_reset = FALSE;
2079 
2080 	/* Get current control state. */
2081 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
2082 
2083 	/*
2084 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
2085 	 * on the last TLP read/write transaction when MAC is reset.
2086 	 */
2087 	ret_val = e1000_disable_pcie_master_generic(hw);
2088 	if (ret_val)
2089 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
2090 
2091 	DEBUGOUT("Masking off all interrupts\n");
2092 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2093 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
2094 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
2095 	E1000_WRITE_FLUSH(hw);
2096 
2097 	msec_delay(10);
2098 
2099 	/* Determine whether or not a global dev reset is requested */
2100 	if (global_device_reset &&
2101 		e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
2102 			global_device_reset = FALSE;
2103 
2104 	if (global_device_reset &&
2105 		!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
2106 		ctrl |= E1000_CTRL_DEV_RST;
2107 	else
2108 		ctrl |= E1000_CTRL_RST;
2109 
2110 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
2111 
2112 	/* Add delay to insure DEV_RST has time to complete */
2113 	if (global_device_reset)
2114 		msec_delay(5);
2115 
2116 	ret_val = e1000_get_auto_rd_done_generic(hw);
2117 	if (ret_val) {
2118 		/*
2119 		 * When auto config read does not complete, do not
2120 		 * return with an error. This can happen in situations
2121 		 * where there is no eeprom and prevents getting link.
2122 		 */
2123 		DEBUGOUT("Auto Read Done did not complete\n");
2124 	}
2125 
2126 	/* If EEPROM is not present, run manual init scripts */
2127 	if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
2128 		e1000_reset_init_script_82575(hw);
2129 
2130 	/* clear global device reset status bit */
2131 	E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
2132 
2133 	/* Clear any pending interrupt events. */
2134 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2135 	E1000_READ_REG(hw, E1000_ICR);
2136 
2137 	ret_val = e1000_reset_mdicnfg_82580(hw);
2138 	if (ret_val)
2139 		DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
2140 
2141 	/* Install any alternate MAC address into RAR0 */
2142 	ret_val = e1000_check_alt_mac_addr_generic(hw);
2143 
2144 	/* Release semaphore */
2145 	if (global_device_reset)
2146 		e1000_release_swfw_sync_82575(hw, swmbsw_mask);
2147 
2148 	return ret_val;
2149 }
2150 
2151 /**
2152  *  e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
2153  *  @data: data received by reading RXPBS register
2154  *
2155  *  The 82580 uses a table based approach for packet buffer allocation sizes.
2156  *  This function converts the retrieved value into the correct table value
2157  *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2158  *  0x0 36  72 144   1   2   4   8  16
2159  *  0x8 35  70 140 rsv rsv rsv rsv rsv
2160  */
2161 u16 e1000_rxpbs_adjust_82580(u32 data)
2162 {
2163 	u16 ret_val = 0;
2164 
2165 	if (data < E1000_82580_RXPBS_TABLE_SIZE)
2166 		ret_val = e1000_82580_rxpbs_table[data];
2167 
2168 	return ret_val;
2169 }
2170 
2171 /**
2172  *  e1000_validate_nvm_checksum_with_offset - Validate EEPROM
2173  *  checksum
2174  *  @hw: pointer to the HW structure
2175  *  @offset: offset in words of the checksum protected region
2176  *
2177  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2178  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
2179  **/
2180 s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2181 {
2182 	s32 ret_val = E1000_SUCCESS;
2183 	u16 checksum = 0;
2184 	u16 i, nvm_data;
2185 
2186 	DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
2187 
2188 	for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2189 		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2190 		if (ret_val) {
2191 			DEBUGOUT("NVM Read Error\n");
2192 			goto out;
2193 		}
2194 		checksum += nvm_data;
2195 	}
2196 
2197 	if (checksum != (u16) NVM_SUM) {
2198 		DEBUGOUT("NVM Checksum Invalid\n");
2199 		ret_val = -E1000_ERR_NVM;
2200 		goto out;
2201 	}
2202 
2203 out:
2204 	return ret_val;
2205 }
2206 
2207 /**
2208  *  e1000_update_nvm_checksum_with_offset - Update EEPROM
2209  *  checksum
2210  *  @hw: pointer to the HW structure
2211  *  @offset: offset in words of the checksum protected region
2212  *
2213  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
2214  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
2215  *  value to the EEPROM.
2216  **/
2217 s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2218 {
2219 	s32 ret_val;
2220 	u16 checksum = 0;
2221 	u16 i, nvm_data;
2222 
2223 	DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
2224 
2225 	for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2226 		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2227 		if (ret_val) {
2228 			DEBUGOUT("NVM Read Error while updating checksum.\n");
2229 			goto out;
2230 		}
2231 		checksum += nvm_data;
2232 	}
2233 	checksum = (u16) NVM_SUM - checksum;
2234 	ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2235 				&checksum);
2236 	if (ret_val)
2237 		DEBUGOUT("NVM Write Error while updating checksum.\n");
2238 
2239 out:
2240 	return ret_val;
2241 }
2242 
2243 /**
2244  *  e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
2245  *  @hw: pointer to the HW structure
2246  *
2247  *  Calculates the EEPROM section checksum by reading/adding each word of
2248  *  the EEPROM and then verifies that the sum of the EEPROM is
2249  *  equal to 0xBABA.
2250  **/
2251 static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
2252 {
2253 	s32 ret_val = E1000_SUCCESS;
2254 	u16 eeprom_regions_count = 1;
2255 	u16 j, nvm_data;
2256 	u16 nvm_offset;
2257 
2258 	DEBUGFUNC("e1000_validate_nvm_checksum_82580");
2259 
2260 	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2261 	if (ret_val) {
2262 		DEBUGOUT("NVM Read Error\n");
2263 		goto out;
2264 	}
2265 
2266 	if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2267 		/* if chekcsums compatibility bit is set validate checksums
2268 		 * for all 4 ports. */
2269 		eeprom_regions_count = 4;
2270 	}
2271 
2272 	for (j = 0; j < eeprom_regions_count; j++) {
2273 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2274 		ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2275 								nvm_offset);
2276 		if (ret_val != E1000_SUCCESS)
2277 			goto out;
2278 	}
2279 
2280 out:
2281 	return ret_val;
2282 }
2283 
2284 /**
2285  *  e1000_update_nvm_checksum_82580 - Update EEPROM checksum
2286  *  @hw: pointer to the HW structure
2287  *
2288  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2289  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2290  *  checksum and writes the value to the EEPROM.
2291  **/
2292 static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
2293 {
2294 	s32 ret_val;
2295 	u16 j, nvm_data;
2296 	u16 nvm_offset;
2297 
2298 	DEBUGFUNC("e1000_update_nvm_checksum_82580");
2299 
2300 	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2301 	if (ret_val) {
2302 		DEBUGOUT("NVM Read Error while updating checksum"
2303 			" compatibility bit.\n");
2304 		goto out;
2305 	}
2306 
2307 	if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2308 		/* set compatibility bit to validate checksums appropriately */
2309 		nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2310 		ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2311 					&nvm_data);
2312 		if (ret_val) {
2313 			DEBUGOUT("NVM Write Error while updating checksum"
2314 				" compatibility bit.\n");
2315 			goto out;
2316 		}
2317 	}
2318 
2319 	for (j = 0; j < 4; j++) {
2320 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2321 		ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2322 		if (ret_val) {
2323 			goto out;
2324 		}
2325 	}
2326 
2327 out:
2328 	return ret_val;
2329 }
2330 
2331 /**
2332  *  e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
2333  *  @hw: pointer to the HW structure
2334  *
2335  *  Calculates the EEPROM section checksum by reading/adding each word of
2336  *  the EEPROM and then verifies that the sum of the EEPROM is
2337  *  equal to 0xBABA.
2338  **/
2339 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
2340 {
2341 	s32 ret_val = E1000_SUCCESS;
2342 	u16 j;
2343 	u16 nvm_offset;
2344 
2345 	DEBUGFUNC("e1000_validate_nvm_checksum_i350");
2346 
2347 	for (j = 0; j < 4; j++) {
2348 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2349 		ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2350 								nvm_offset);
2351 		if (ret_val != E1000_SUCCESS)
2352 			goto out;
2353 	}
2354 
2355 out:
2356 	return ret_val;
2357 }
2358 
2359 /**
2360  *  e1000_update_nvm_checksum_i350 - Update EEPROM checksum
2361  *  @hw: pointer to the HW structure
2362  *
2363  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2364  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2365  *  checksum and writes the value to the EEPROM.
2366  **/
2367 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
2368 {
2369 	s32 ret_val = E1000_SUCCESS;
2370 	u16 j;
2371 	u16 nvm_offset;
2372 
2373 	DEBUGFUNC("e1000_update_nvm_checksum_i350");
2374 
2375 	for (j = 0; j < 4; j++) {
2376 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2377 		ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2378 		if (ret_val != E1000_SUCCESS)
2379 			goto out;
2380 	}
2381 
2382 out:
2383 	return ret_val;
2384 }
2385 
2386 /**
2387  *  e1000_set_eee_i350 - Enable/disable EEE support
2388  *  @hw: pointer to the HW structure
2389  *
2390  *  Enable/disable EEE based on setting in dev_spec structure.
2391  *
2392  **/
2393 s32 e1000_set_eee_i350(struct e1000_hw *hw)
2394 {
2395 	s32 ret_val = E1000_SUCCESS;
2396 	u32 ipcnfg, eeer, ctrl_ext;
2397 
2398 	DEBUGFUNC("e1000_set_eee_i350");
2399 
2400 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2401 	if ((hw->mac.type != e1000_i350) ||
2402 	    (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
2403 		goto out;
2404 	ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2405 	eeer = E1000_READ_REG(hw, E1000_EEER);
2406 
2407 	/* enable or disable per user setting */
2408 	if (!(hw->dev_spec._82575.eee_disable)) {
2409 		ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
2410 		           E1000_IPCNFG_EEE_100M_AN);
2411 		eeer |= (E1000_EEER_TX_LPI_EN |
2412 		         E1000_EEER_RX_LPI_EN |
2413 		         E1000_EEER_LPI_FC);
2414 
2415 	} else {
2416 		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2417 		            E1000_IPCNFG_EEE_100M_AN);
2418 		eeer &= ~(E1000_EEER_TX_LPI_EN |
2419 		          E1000_EEER_RX_LPI_EN |
2420 		          E1000_EEER_LPI_FC);
2421 	}
2422 	E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
2423 	E1000_WRITE_REG(hw, E1000_EEER, eeer);
2424 			E1000_READ_REG(hw, E1000_IPCNFG);
2425 			E1000_READ_REG(hw, E1000_EEER);
2426 out:
2427 
2428 	return ret_val;
2429 }
2430