xref: /dragonfly/sys/dev/netif/ig_hal/e1000_ich8lan.c (revision ff0a8843)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD:$*/
34 
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  * Ethernet Connection I217-LM
63  * Ethernet Connection I217-V
64  * Ethernet Connection I218-V
65  * Ethernet Connection I218-LM
66  * Ethernet Connection (2) I218-LM
67  * Ethernet Connection (2) I218-V
68  * Ethernet Connection (3) I218-LM
69  * Ethernet Connection (3) I218-V
70  */
71 
72 #include "e1000_api.h"
73 
74 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84 					      u8 *mc_addr_list,
85 					      u32 mc_addr_count);
86 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90 					    bool active);
91 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92 					    bool active);
93 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 				   u16 words, u16 *data);
95 static s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset,
96 				   u16 words, u16 *data);
97 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 				    u16 words, u16 *data);
99 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103 					    u16 *data);
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 					   u16 *speed, u16 *duplex);
113 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 					  u32 offset, u8 *data);
127 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128 					  u8 size, u16 *data);
129 static s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130 					  u32 *data);
131 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
132 					  u32 offset, u16 *data);
133 static s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
134 					  u32 offset, u32 *data);
135 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
136 						 u32 offset, u8 byte);
137 static s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
138 						 u32 offset, u32 dword);
139 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
140 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
141 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
142 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
143 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
144 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
145 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
146 
147 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
148 /* Offset 04h HSFSTS */
149 union ich8_hws_flash_status {
150 	struct ich8_hsfsts {
151 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
152 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
153 		u16 dael:1; /* bit 2 Direct Access error Log */
154 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
155 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
156 		u16 reserved1:2; /* bit 13:6 Reserved */
157 		u16 reserved2:6; /* bit 13:6 Reserved */
158 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
159 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
160 	} hsf_status;
161 	u16 regval;
162 };
163 
164 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
165 /* Offset 06h FLCTL */
166 union ich8_hws_flash_ctrl {
167 	struct ich8_hsflctl {
168 		u16 flcgo:1;   /* 0 Flash Cycle Go */
169 		u16 flcycle:2;   /* 2:1 Flash Cycle */
170 		u16 reserved:5;   /* 7:3 Reserved  */
171 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
172 		u16 flockdn:6;   /* 15:10 Reserved */
173 	} hsf_ctrl;
174 	u16 regval;
175 };
176 
177 /* ICH Flash Region Access Permissions */
178 union ich8_hws_flash_regacc {
179 	struct ich8_flracc {
180 		u32 grra:8; /* 0:7 GbE region Read Access */
181 		u32 grwa:8; /* 8:15 GbE region Write Access */
182 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
183 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
184 	} hsf_flregacc;
185 	u16 regval;
186 };
187 
188 /**
189  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
190  *  @hw: pointer to the HW structure
191  *
192  *  Test access to the PHY registers by reading the PHY ID registers.  If
193  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
194  *  otherwise assume the read PHY ID is correct if it is valid.
195  *
196  *  Assumes the sw/fw/hw semaphore is already acquired.
197  **/
198 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
199 {
200 	u16 phy_reg = 0;
201 	u32 phy_id = 0;
202 	s32 ret_val = 0;
203 	u16 retry_count;
204 	u32 mac_reg = 0;
205 
206 	for (retry_count = 0; retry_count < 2; retry_count++) {
207 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
208 		if (ret_val || (phy_reg == 0xFFFF))
209 			continue;
210 		phy_id = (u32)(phy_reg << 16);
211 
212 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
213 		if (ret_val || (phy_reg == 0xFFFF)) {
214 			phy_id = 0;
215 			continue;
216 		}
217 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
218 		break;
219 	}
220 
221 	if (hw->phy.id) {
222 		if  (hw->phy.id == phy_id)
223 			goto out;
224 	} else if (phy_id) {
225 		hw->phy.id = phy_id;
226 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
227 		goto out;
228 	}
229 
230 	/* In case the PHY needs to be in mdio slow mode,
231 	 * set slow mode and try to get the PHY id again.
232 	 */
233 	if (hw->mac.type < e1000_pch_lpt) {
234 		hw->phy.ops.release(hw);
235 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
236 		if (!ret_val)
237 			ret_val = e1000_get_phy_id(hw);
238 		hw->phy.ops.acquire(hw);
239 	}
240 
241 	if (ret_val)
242 		return FALSE;
243 out:
244 	if (hw->mac.type == e1000_pch_lpt ||
245 	    hw->mac.type == e1000_pch_spt) {
246 		/* Unforce SMBus mode in PHY */
247 		hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
248 		phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
249 		hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
250 
251 		/* Unforce SMBus mode in MAC */
252 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
253 		mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
254 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
255 	}
256 
257 	return TRUE;
258 }
259 
260 /**
261  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
262  *  @hw: pointer to the HW structure
263  *
264  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
265  *  used to reset the PHY to a quiescent state when necessary.
266  **/
267 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
268 {
269 	u32 mac_reg;
270 
271 	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
272 
273 	/* Set Phy Config Counter to 50msec */
274 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
275 	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
276 	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
277 	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
278 
279 	/* Toggle LANPHYPC Value bit */
280 	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
281 	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
282 	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
283 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
284 	E1000_WRITE_FLUSH(hw);
285 	usec_delay(10);
286 	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
287 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
288 	E1000_WRITE_FLUSH(hw);
289 
290 	if (hw->mac.type < e1000_pch_lpt) {
291 		msec_delay(50);
292 	} else {
293 		u16 count = 20;
294 
295 		do {
296 			msec_delay(5);
297 		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
298 			   E1000_CTRL_EXT_LPCD) && count--);
299 
300 		msec_delay(30);
301 	}
302 }
303 
304 /**
305  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
306  *  @hw: pointer to the HW structure
307  *
308  *  Workarounds/flow necessary for PHY initialization during driver load
309  *  and resume paths.
310  **/
311 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
312 {
313 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
314 	s32 ret_val;
315 
316 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
317 
318 	/* Gate automatic PHY configuration by hardware on managed and
319 	 * non-managed 82579 and newer adapters.
320 	 */
321 	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
322 
323 	/* It is not possible to be certain of the current state of ULP
324 	 * so forcibly disable it.
325 	 */
326 	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
327 	e1000_disable_ulp_lpt_lp(hw, TRUE);
328 
329 	ret_val = hw->phy.ops.acquire(hw);
330 	if (ret_val) {
331 		DEBUGOUT("Failed to initialize PHY flow\n");
332 		goto out;
333 	}
334 
335 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
336 	 * inaccessible and resetting the PHY is not blocked, toggle the
337 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
338 	 */
339 	switch (hw->mac.type) {
340 	case e1000_pch_lpt:
341 	case e1000_pch_spt:
342 		if (e1000_phy_is_accessible_pchlan(hw))
343 			break;
344 
345 		/* Before toggling LANPHYPC, see if PHY is accessible by
346 		 * forcing MAC to SMBus mode first.
347 		 */
348 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
349 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
350 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
351 
352 		/* Wait 50 milliseconds for MAC to finish any retries
353 		 * that it might be trying to perform from previous
354 		 * attempts to acknowledge any phy read requests.
355 		 */
356 		 msec_delay(50);
357 
358 		/* fall-through */
359 	case e1000_pch2lan:
360 		if (e1000_phy_is_accessible_pchlan(hw))
361 			break;
362 
363 		/* fall-through */
364 	case e1000_pchlan:
365 		if ((hw->mac.type == e1000_pchlan) &&
366 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
367 			break;
368 
369 		if (hw->phy.ops.check_reset_block(hw)) {
370 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
371 			ret_val = -E1000_ERR_PHY;
372 			break;
373 		}
374 
375 		/* Toggle LANPHYPC Value bit */
376 		e1000_toggle_lanphypc_pch_lpt(hw);
377 		if (hw->mac.type >= e1000_pch_lpt) {
378 			if (e1000_phy_is_accessible_pchlan(hw))
379 				break;
380 
381 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
382 			 * so ensure that the MAC is also out of SMBus mode
383 			 */
384 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
385 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
386 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
387 
388 			if (e1000_phy_is_accessible_pchlan(hw))
389 				break;
390 
391 			ret_val = -E1000_ERR_PHY;
392 		}
393 		break;
394 	default:
395 		break;
396 	}
397 
398 	hw->phy.ops.release(hw);
399 	if (!ret_val) {
400 
401 		/* Check to see if able to reset PHY.  Print error if not */
402 		if (hw->phy.ops.check_reset_block(hw)) {
403 			ERROR_REPORT("Reset blocked by ME\n");
404 			goto out;
405 		}
406 
407 		/* Reset the PHY before any access to it.  Doing so, ensures
408 		 * that the PHY is in a known good state before we read/write
409 		 * PHY registers.  The generic reset is sufficient here,
410 		 * because we haven't determined the PHY type yet.
411 		 */
412 		ret_val = e1000_phy_hw_reset_generic(hw);
413 		if (ret_val)
414 			goto out;
415 
416 		/* On a successful reset, possibly need to wait for the PHY
417 		 * to quiesce to an accessible state before returning control
418 		 * to the calling function.  If the PHY does not quiesce, then
419 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
420 		 *  the PHY is in.
421 		 */
422 		ret_val = hw->phy.ops.check_reset_block(hw);
423 		if (ret_val)
424 			ERROR_REPORT("ME blocked access to PHY after reset\n");
425 	}
426 
427 out:
428 	/* Ungate automatic PHY configuration on non-managed 82579 */
429 	if ((hw->mac.type == e1000_pch2lan) &&
430 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
431 		msec_delay(10);
432 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
433 	}
434 
435 	return ret_val;
436 }
437 
438 /**
439  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
440  *  @hw: pointer to the HW structure
441  *
442  *  Initialize family-specific PHY parameters and function pointers.
443  **/
444 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
445 {
446 	struct e1000_phy_info *phy = &hw->phy;
447 	s32 ret_val;
448 
449 	DEBUGFUNC("e1000_init_phy_params_pchlan");
450 
451 	phy->addr		= 1;
452 	phy->reset_delay_us	= 100;
453 
454 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
455 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
456 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
457 	phy->ops.set_page	= e1000_set_page_igp;
458 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
459 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
460 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
461 	phy->ops.release	= e1000_release_swflag_ich8lan;
462 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
463 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
464 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
465 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
466 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
467 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
468 	phy->ops.power_up	= e1000_power_up_phy_copper;
469 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
470 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
471 
472 	phy->id = e1000_phy_unknown;
473 
474 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
475 	if (ret_val)
476 		return ret_val;
477 
478 	if (phy->id == e1000_phy_unknown)
479 		switch (hw->mac.type) {
480 		default:
481 			ret_val = e1000_get_phy_id(hw);
482 			if (ret_val)
483 				return ret_val;
484 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
485 				break;
486 			/* fall-through */
487 		case e1000_pch2lan:
488 		case e1000_pch_lpt:
489 		case e1000_pch_spt:
490 			/* In case the PHY needs to be in mdio slow mode,
491 			 * set slow mode and try to get the PHY id again.
492 			 */
493 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
494 			if (ret_val)
495 				return ret_val;
496 			ret_val = e1000_get_phy_id(hw);
497 			if (ret_val)
498 				return ret_val;
499 			break;
500 		}
501 	phy->type = e1000_get_phy_type_from_id(phy->id);
502 
503 	switch (phy->type) {
504 	case e1000_phy_82577:
505 	case e1000_phy_82579:
506 	case e1000_phy_i217:
507 		phy->ops.check_polarity = e1000_check_polarity_82577;
508 		phy->ops.force_speed_duplex =
509 			e1000_phy_force_speed_duplex_82577;
510 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
511 		phy->ops.get_info = e1000_get_phy_info_82577;
512 		phy->ops.commit = e1000_phy_sw_reset_generic;
513 		break;
514 	case e1000_phy_82578:
515 		phy->ops.check_polarity = e1000_check_polarity_m88;
516 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
517 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
518 		phy->ops.get_info = e1000_get_phy_info_m88;
519 		break;
520 	default:
521 		ret_val = -E1000_ERR_PHY;
522 		break;
523 	}
524 
525 	return ret_val;
526 }
527 
528 /**
529  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
530  *  @hw: pointer to the HW structure
531  *
532  *  Initialize family-specific PHY parameters and function pointers.
533  **/
534 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
535 {
536 	struct e1000_phy_info *phy = &hw->phy;
537 	s32 ret_val;
538 	u16 i = 0;
539 
540 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
541 
542 	phy->addr		= 1;
543 	phy->reset_delay_us	= 100;
544 
545 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
546 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
547 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
548 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
549 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
550 	phy->ops.release	= e1000_release_swflag_ich8lan;
551 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
552 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
553 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
554 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
555 	phy->ops.power_up	= e1000_power_up_phy_copper;
556 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
557 
558 	/* We may need to do this twice - once for IGP and if that fails,
559 	 * we'll set BM func pointers and try again
560 	 */
561 	ret_val = e1000_determine_phy_address(hw);
562 	if (ret_val) {
563 		phy->ops.write_reg = e1000_write_phy_reg_bm;
564 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
565 		ret_val = e1000_determine_phy_address(hw);
566 		if (ret_val) {
567 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
568 			return ret_val;
569 		}
570 	}
571 
572 	phy->id = 0;
573 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
574 	       (i++ < 100)) {
575 		msec_delay(1);
576 		ret_val = e1000_get_phy_id(hw);
577 		if (ret_val)
578 			return ret_val;
579 	}
580 
581 	/* Verify phy id */
582 	switch (phy->id) {
583 	case IGP03E1000_E_PHY_ID:
584 		phy->type = e1000_phy_igp_3;
585 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
586 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
587 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
588 		phy->ops.get_info = e1000_get_phy_info_igp;
589 		phy->ops.check_polarity = e1000_check_polarity_igp;
590 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
591 		break;
592 	case IFE_E_PHY_ID:
593 	case IFE_PLUS_E_PHY_ID:
594 	case IFE_C_E_PHY_ID:
595 		phy->type = e1000_phy_ife;
596 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
597 		phy->ops.get_info = e1000_get_phy_info_ife;
598 		phy->ops.check_polarity = e1000_check_polarity_ife;
599 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
600 		break;
601 	case BME1000_E_PHY_ID:
602 		phy->type = e1000_phy_bm;
603 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
604 		phy->ops.read_reg = e1000_read_phy_reg_bm;
605 		phy->ops.write_reg = e1000_write_phy_reg_bm;
606 		phy->ops.commit = e1000_phy_sw_reset_generic;
607 		phy->ops.get_info = e1000_get_phy_info_m88;
608 		phy->ops.check_polarity = e1000_check_polarity_m88;
609 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
610 		break;
611 	default:
612 		return -E1000_ERR_PHY;
613 		break;
614 	}
615 
616 	return E1000_SUCCESS;
617 }
618 
619 /**
620  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
621  *  @hw: pointer to the HW structure
622  *
623  *  Initialize family-specific NVM parameters and function
624  *  pointers.
625  **/
626 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
627 {
628 	struct e1000_nvm_info *nvm = &hw->nvm;
629 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
630 	u32 gfpreg, sector_base_addr, sector_end_addr;
631 	u16 i;
632 	u32 nvm_size;
633 
634 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
635 
636 	/* Can't read flash registers if the register set isn't mapped. */
637 	nvm->type = e1000_nvm_flash_sw;
638 
639 	/* XXX turn flash_address into flash_reg_off or something more appropriate */
640 #define E1000_FLASH_BASE_ADDR	0xE000  /* offset of NVM access regs */
641 #define NVM_SIZE_MULTIPLIER	4096
642 
643 	if (hw->mac.type == e1000_pch_spt) {
644 		/*
645 		 * In SPT the flash is in the GbE flash region of the
646 		 * main hw map.  GFPREG does not exist.  Take NVM size from
647 		 * the STRAP register.
648 		 */
649 		nvm->flash_base_addr = 0;
650 		nvm_size = (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
651 			   * NVM_SIZE_MULTIPLIER;
652 		nvm->flash_bank_size = nvm_size / 2;
653 		/* Adjust to word count */
654 		nvm->flash_bank_size /= sizeof(u16);
655 		/* Set the base address for flash register access */
656 		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
657 	} else {
658 		if (!hw->flash_address) {
659 			DEBUGOUT("ERROR: Flash registers not mapped\n");
660 			return -E1000_ERR_CONFIG;
661 		}
662 
663 		gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
664 
665 		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
666 		 * Add 1 to sector_end_addr since this sector is included in
667 		 * the overall size.
668 		 */
669 		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
670 		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
671 
672 		/* flash_base_addr is byte-aligned */
673 		nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
674 
675 		/* find total size of the NVM, then cut in half since the total
676 		 * size represents two separate NVM banks.
677 		 */
678 		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
679 					<< FLASH_SECTOR_ADDR_SHIFT);
680 		nvm->flash_bank_size /= 2;
681 		/* Adjust to word count */
682 		nvm->flash_bank_size /= sizeof(u16);
683 	}
684 
685 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
686 
687 	/* Clear shadow ram */
688 	for (i = 0; i < nvm->word_size; i++) {
689 		dev_spec->shadow_ram[i].modified = FALSE;
690 		dev_spec->shadow_ram[i].value    = 0xFFFF;
691 	}
692 
693 	/* Function Pointers */
694 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
695 	nvm->ops.release	= e1000_release_nvm_ich8lan;
696 	if (hw->mac.type == e1000_pch_spt) {
697 		nvm->ops.read		= e1000_read_nvm_spt;
698 		nvm->ops.update		= e1000_update_nvm_checksum_spt;
699 	} else {
700 		nvm->ops.read		= e1000_read_nvm_ich8lan;
701 		nvm->ops.update		= e1000_update_nvm_checksum_ich8lan;
702 	}
703 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
704 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
705 	nvm->ops.write		= e1000_write_nvm_ich8lan;
706 
707 	return E1000_SUCCESS;
708 }
709 
710 /**
711  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
712  *  @hw: pointer to the HW structure
713  *
714  *  Initialize family-specific MAC parameters and function
715  *  pointers.
716  **/
717 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
718 {
719 	struct e1000_mac_info *mac = &hw->mac;
720 	u16 pci_cfg;
721 
722 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
723 
724 	/* Set media type function pointer */
725 	hw->phy.media_type = e1000_media_type_copper;
726 
727 	/* Set mta register count */
728 	mac->mta_reg_count = 32;
729 	/* Set rar entry count */
730 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
731 	if (mac->type == e1000_ich8lan)
732 		mac->rar_entry_count--;
733 	/* Set if part includes ASF firmware */
734 	mac->asf_firmware_present = TRUE;
735 	/* FWSM register */
736 	mac->has_fwsm = TRUE;
737 	/* ARC subsystem not supported */
738 	mac->arc_subsystem_valid = FALSE;
739 	/* Adaptive IFS supported */
740 	mac->adaptive_ifs = TRUE;
741 
742 	/* Function pointers */
743 
744 	/* bus type/speed/width */
745 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
746 	/* function id */
747 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
748 	/* reset */
749 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
750 	/* hw initialization */
751 	mac->ops.init_hw = e1000_init_hw_ich8lan;
752 	/* link setup */
753 	mac->ops.setup_link = e1000_setup_link_ich8lan;
754 	/* physical interface setup */
755 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
756 	/* check for link */
757 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
758 	/* link info */
759 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
760 	/* multicast address update */
761 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
762 	/* clear hardware counters */
763 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
764 
765 	/* LED and other operations */
766 	switch (mac->type) {
767 	case e1000_ich8lan:
768 	case e1000_ich9lan:
769 	case e1000_ich10lan:
770 		/* check management mode */
771 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
772 		/* ID LED init */
773 		mac->ops.id_led_init = e1000_id_led_init_generic;
774 		/* blink LED */
775 		mac->ops.blink_led = e1000_blink_led_generic;
776 		/* setup LED */
777 		mac->ops.setup_led = e1000_setup_led_generic;
778 		/* cleanup LED */
779 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
780 		/* turn on/off LED */
781 		mac->ops.led_on = e1000_led_on_ich8lan;
782 		mac->ops.led_off = e1000_led_off_ich8lan;
783 		break;
784 	case e1000_pch2lan:
785 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
786 		mac->ops.rar_set = e1000_rar_set_pch2lan;
787 		/* fall-through */
788 	case e1000_pch_lpt:
789 	case e1000_pch_spt:
790 		/* multicast address update for pch2 */
791 		mac->ops.update_mc_addr_list =
792 			e1000_update_mc_addr_list_pch2lan;
793 	case e1000_pchlan:
794 		/* save PCH revision_id */
795 		e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
796 		hw->revision_id = (u8)(pci_cfg &= 0x000F);
797 		/* check management mode */
798 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
799 		/* ID LED init */
800 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
801 		/* setup LED */
802 		mac->ops.setup_led = e1000_setup_led_pchlan;
803 		/* cleanup LED */
804 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
805 		/* turn on/off LED */
806 		mac->ops.led_on = e1000_led_on_pchlan;
807 		mac->ops.led_off = e1000_led_off_pchlan;
808 		break;
809 	default:
810 		break;
811 	}
812 
813 	if (mac->type == e1000_pch_lpt ||
814 	    mac->type == e1000_pch_spt) {
815 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
816 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
817 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
818 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
819 	}
820 
821 	/* Enable PCS Lock-loss workaround for ICH8 */
822 	if (mac->type == e1000_ich8lan)
823 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
824 
825 	return E1000_SUCCESS;
826 }
827 
828 /**
829  *  __e1000_access_emi_reg_locked - Read/write EMI register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: pointer to value to read/write from/to the EMI address
833  *  @read: boolean flag to indicate read or write
834  *
835  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
836  **/
837 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
838 					 u16 *data, bool read)
839 {
840 	s32 ret_val;
841 
842 	DEBUGFUNC("__e1000_access_emi_reg_locked");
843 
844 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
845 	if (ret_val)
846 		return ret_val;
847 
848 	if (read)
849 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
850 						      data);
851 	else
852 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
853 						       *data);
854 
855 	return ret_val;
856 }
857 
858 /**
859  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
860  *  @hw: pointer to the HW structure
861  *  @addr: EMI address to program
862  *  @data: value to be read from the EMI address
863  *
864  *  Assumes the SW/FW/HW Semaphore is already acquired.
865  **/
866 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
867 {
868 	DEBUGFUNC("e1000_read_emi_reg_locked");
869 
870 	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
871 }
872 
873 /**
874  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
875  *  @hw: pointer to the HW structure
876  *  @addr: EMI address to program
877  *  @data: value to be written to the EMI address
878  *
879  *  Assumes the SW/FW/HW Semaphore is already acquired.
880  **/
881 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
882 {
883 	DEBUGFUNC("e1000_read_emi_reg_locked");
884 
885 	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
886 }
887 
888 /**
889  *  e1000_set_eee_pchlan - Enable/disable EEE support
890  *  @hw: pointer to the HW structure
891  *
892  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
893  *  the link and the EEE capabilities of the link partner.  The LPI Control
894  *  register bits will remain set only if/when link is up.
895  *
896  *  EEE LPI must not be asserted earlier than one second after link is up.
897  *  On 82579, EEE LPI should not be enabled until such time otherwise there
898  *  can be link issues with some switches.  Other devices can have EEE LPI
899  *  enabled immediately upon link up since they have a timer in hardware which
900  *  prevents LPI from being asserted too early.
901  **/
902 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
903 {
904 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
905 	s32 ret_val;
906 	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
907 
908 	DEBUGFUNC("e1000_set_eee_pchlan");
909 
910 	switch (hw->phy.type) {
911 	case e1000_phy_82579:
912 		lpa = I82579_EEE_LP_ABILITY;
913 		pcs_status = I82579_EEE_PCS_STATUS;
914 		adv_addr = I82579_EEE_ADVERTISEMENT;
915 		break;
916 	case e1000_phy_i217:
917 		lpa = I217_EEE_LP_ABILITY;
918 		pcs_status = I217_EEE_PCS_STATUS;
919 		adv_addr = I217_EEE_ADVERTISEMENT;
920 		break;
921 	default:
922 		return E1000_SUCCESS;
923 	}
924 
925 	ret_val = hw->phy.ops.acquire(hw);
926 	if (ret_val)
927 		return ret_val;
928 
929 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
930 	if (ret_val)
931 		goto release;
932 
933 	/* Clear bits that enable EEE in various speeds */
934 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
935 
936 	/* Enable EEE if not disabled by user */
937 	if (!dev_spec->eee_disable) {
938 		/* Save off link partner's EEE ability */
939 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
940 						    &dev_spec->eee_lp_ability);
941 		if (ret_val)
942 			goto release;
943 
944 		/* Read EEE advertisement */
945 		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
946 		if (ret_val)
947 			goto release;
948 
949 		/* Enable EEE only for speeds in which the link partner is
950 		 * EEE capable and for which we advertise EEE.
951 		 */
952 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
953 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
954 
955 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
956 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
957 			if (data & NWAY_LPAR_100TX_FD_CAPS)
958 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
959 			else
960 				/* EEE is not supported in 100Half, so ignore
961 				 * partner's EEE in 100 ability if full-duplex
962 				 * is not advertised.
963 				 */
964 				dev_spec->eee_lp_ability &=
965 				    ~I82579_EEE_100_SUPPORTED;
966 		}
967 	}
968 
969 	if (hw->phy.type == e1000_phy_82579) {
970 		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
971 						    &data);
972 		if (ret_val)
973 			goto release;
974 
975 		data &= ~I82579_LPI_100_PLL_SHUT;
976 		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
977 						     data);
978 	}
979 
980 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
981 	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
982 	if (ret_val)
983 		goto release;
984 
985 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
986 release:
987 	hw->phy.ops.release(hw);
988 
989 	return ret_val;
990 }
991 
992 /**
993  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
994  *  @hw:   pointer to the HW structure
995  *  @link: link up bool flag
996  *
997  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
998  *  preventing further DMA write requests.  Workaround the issue by disabling
999  *  the de-assertion of the clock request when in 1Gpbs mode.
1000  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1001  *  speeds in order to avoid Tx hangs.
1002  **/
1003 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1004 {
1005 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1006 	u32 status = E1000_READ_REG(hw, E1000_STATUS);
1007 	s32 ret_val = E1000_SUCCESS;
1008 	u16 reg;
1009 
1010 	if (link && (status & E1000_STATUS_SPEED_1000)) {
1011 		ret_val = hw->phy.ops.acquire(hw);
1012 		if (ret_val)
1013 			return ret_val;
1014 
1015 		ret_val =
1016 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1017 					       &reg);
1018 		if (ret_val)
1019 			goto release;
1020 
1021 		ret_val =
1022 		    e1000_write_kmrn_reg_locked(hw,
1023 						E1000_KMRNCTRLSTA_K1_CONFIG,
1024 						reg &
1025 						~E1000_KMRNCTRLSTA_K1_ENABLE);
1026 		if (ret_val)
1027 			goto release;
1028 
1029 		usec_delay(10);
1030 
1031 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1032 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1033 
1034 		ret_val =
1035 		    e1000_write_kmrn_reg_locked(hw,
1036 						E1000_KMRNCTRLSTA_K1_CONFIG,
1037 						reg);
1038 release:
1039 		hw->phy.ops.release(hw);
1040 	} else {
1041 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
1042 		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1043 
1044 		if (!link || ((status & E1000_STATUS_SPEED_100) &&
1045 			      (status & E1000_STATUS_FD)))
1046 			goto update_fextnvm6;
1047 
1048 		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1049 		if (ret_val)
1050 			return ret_val;
1051 
1052 		/* Clear link status transmit timeout */
1053 		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1054 
1055 		if (status & E1000_STATUS_SPEED_100) {
1056 			/* Set inband Tx timeout to 5x10us for 100Half */
1057 			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1058 
1059 			/* Do not extend the K1 entry latency for 100Half */
1060 			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1061 		} else {
1062 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1063 			reg |= 50 <<
1064 			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1065 
1066 			/* Extend the K1 entry latency for 10 Mbps */
1067 			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1068 		}
1069 
1070 		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1071 		if (ret_val)
1072 			return ret_val;
1073 
1074 update_fextnvm6:
1075 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1076 	}
1077 
1078 	return ret_val;
1079 }
1080 
1081 static u64 e1000_ltr2ns(u16 ltr)
1082 {
1083 	u32 value, scale;
1084 
1085 	/* Determine the latency in nsec based on the LTR value & scale */
1086 	value = ltr & E1000_LTRV_VALUE_MASK;
1087 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1088 
1089 	return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1090 }
1091 
1092 /**
1093  *  e1000_platform_pm_pch_lpt - Set platform power management values
1094  *  @hw: pointer to the HW structure
1095  *  @link: bool indicating link status
1096  *
1097  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1098  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1099  *  when link is up (which must not exceed the maximum latency supported
1100  *  by the platform), otherwise specify there is no LTR requirement.
1101  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1102  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1103  *  Capability register set, on this device LTR is set by writing the
1104  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1105  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1106  *  message to the PMC.
1107  *
1108  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1109  *  high-water mark.
1110  **/
1111 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1112 {
1113 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1114 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1115 	u16 lat_enc = 0;	/* latency encoded */
1116 	s32 obff_hwm = 0;
1117 
1118 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
1119 
1120 	if (link) {
1121 		u16 speed, duplex, scale = 0;
1122 		u16 max_snoop, max_nosnoop;
1123 		u16 max_ltr_enc;	/* max LTR latency encoded */
1124 		s64 lat_ns;		/* latency (ns) */
1125 		s64 value;
1126 		u32 rxa;
1127 
1128 		if (!hw->mac.max_frame_size) {
1129 			DEBUGOUT("max_frame_size not set.\n");
1130 			return -E1000_ERR_CONFIG;
1131 		}
1132 
1133 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1134 		if (!speed) {
1135 			DEBUGOUT("Speed not set.\n");
1136 			return -E1000_ERR_CONFIG;
1137 		}
1138 
1139 		/* Rx Packet Buffer Allocation size (KB) */
1140 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1141 
1142 		/* Determine the maximum latency tolerated by the device.
1143 		 *
1144 		 * Per the PCIe spec, the tolerated latencies are encoded as
1145 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1146 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1147 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1148 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1149 		 */
1150 		lat_ns = ((s64)rxa * 1024 -
1151 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1152 		if (lat_ns < 0)
1153 			lat_ns = 0;
1154 		else
1155 			lat_ns /= speed;
1156 
1157 		value = lat_ns;
1158 		while (value > E1000_LTRV_VALUE_MASK) {
1159 			scale++;
1160 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1161 		}
1162 		if (scale > E1000_LTRV_SCALE_MAX) {
1163 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1164 			return -E1000_ERR_CONFIG;
1165 		}
1166 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1167 
1168 		/* Determine the maximum latency tolerated by the platform */
1169 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1170 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1171 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1172 
1173 		if (lat_enc > max_ltr_enc) {
1174 			lat_enc = max_ltr_enc;
1175 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1176 		}
1177 
1178 		if (lat_ns) {
1179 			lat_ns *= speed * 1000;
1180 			lat_ns /= 8;
1181 			lat_ns /= 1000000000;
1182 			obff_hwm = (s32)(rxa - lat_ns);
1183 		}
1184 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1185 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1186 			return -E1000_ERR_CONFIG;
1187 		}
1188 	}
1189 
1190 	/* Set Snoop and No-Snoop latencies the same */
1191 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1192 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1193 
1194 	/* Set OBFF high water mark */
1195 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1196 	reg |= obff_hwm;
1197 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1198 
1199 	/* Enable OBFF */
1200 	reg = E1000_READ_REG(hw, E1000_SVCR);
1201 	reg |= E1000_SVCR_OFF_EN;
1202 	/* Always unblock interrupts to the CPU even when the system is
1203 	 * in OBFF mode. This ensures that small round-robin traffic
1204 	 * (like ping) does not get dropped or experience long latency.
1205 	 */
1206 	reg |= E1000_SVCR_OFF_MASKINT;
1207 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1208 
1209 	return E1000_SUCCESS;
1210 }
1211 
1212 /**
1213  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1214  *  @hw: pointer to the HW structure
1215  *  @itr: interrupt throttling rate
1216  *
1217  *  Configure OBFF with the updated interrupt rate.
1218  **/
1219 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1220 {
1221 	u32 svcr;
1222 	s32 timer;
1223 
1224 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1225 
1226 	/* Convert ITR value into microseconds for OBFF timer */
1227 	timer = itr & E1000_ITR_MASK;
1228 	timer = (timer * E1000_ITR_MULT) / 1000;
1229 
1230 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1231 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1232 		return -E1000_ERR_CONFIG;
1233 	}
1234 
1235 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1236 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1237 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1238 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1239 
1240 	return E1000_SUCCESS;
1241 }
1242 
1243 /**
1244  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1245  *  @hw: pointer to the HW structure
1246  *  @to_sx: boolean indicating a system power state transition to Sx
1247  *
1248  *  When link is down, configure ULP mode to significantly reduce the power
1249  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1250  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1251  *  system, configure the ULP mode by software.
1252  */
1253 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1254 {
1255 	u32 mac_reg;
1256 	s32 ret_val = E1000_SUCCESS;
1257 	u16 phy_reg;
1258 
1259 	if ((hw->mac.type < e1000_pch_lpt) ||
1260 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1261 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1262 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1263 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1264 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1265 		return 0;
1266 
1267 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1268 		/* Request ME configure ULP mode in the PHY */
1269 		mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1270 		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1271 		E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1272 
1273 		goto out;
1274 	}
1275 
1276 	if (!to_sx) {
1277 		int i = 0;
1278 
1279 		/* Poll up to 5 seconds for Cable Disconnected indication */
1280 		while (!(E1000_READ_REG(hw, E1000_FEXT) &
1281 			 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1282 			/* Bail if link is re-acquired */
1283 			if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1284 				return -E1000_ERR_PHY;
1285 
1286 			if (i++ == 100)
1287 				break;
1288 
1289 			msec_delay(50);
1290 		}
1291 		DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1292 			 (E1000_READ_REG(hw, E1000_FEXT) &
1293 			  E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1294 			 i * 50);
1295 	}
1296 
1297 	ret_val = hw->phy.ops.acquire(hw);
1298 	if (ret_val)
1299 		goto out;
1300 
1301 	/* Force SMBus mode in PHY */
1302 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1303 	if (ret_val)
1304 		goto release;
1305 	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1306 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1307 
1308 	/* Force SMBus mode in MAC */
1309 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1310 	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1311 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1312 
1313 	/* Set Inband ULP Exit, Reset to SMBus mode and
1314 	 * Disable SMBus Release on PERST# in PHY
1315 	 */
1316 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1317 	if (ret_val)
1318 		goto release;
1319 	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1320 		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1321 	if (to_sx) {
1322 		if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1323 			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1324 
1325 		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1326 	} else {
1327 		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1328 	}
1329 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1330 
1331 	/* Set Disable SMBus Release on PERST# in MAC */
1332 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1333 	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1334 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1335 
1336 	/* Commit ULP changes in PHY by starting auto ULP configuration */
1337 	phy_reg |= I218_ULP_CONFIG1_START;
1338 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1339 release:
1340 	hw->phy.ops.release(hw);
1341 out:
1342 	if (ret_val)
1343 		DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1344 	else
1345 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1346 
1347 	return ret_val;
1348 }
1349 
1350 /**
1351  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1352  *  @hw: pointer to the HW structure
1353  *  @force: boolean indicating whether or not to force disabling ULP
1354  *
1355  *  Un-configure ULP mode when link is up, the system is transitioned from
1356  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1357  *  system, poll for an indication from ME that ULP has been un-configured.
1358  *  If not on an ME enabled system, un-configure the ULP mode by software.
1359  *
1360  *  During nominal operation, this function is called when link is acquired
1361  *  to disable ULP mode (force=FALSE); otherwise, for example when unloading
1362  *  the driver or during Sx->S0 transitions, this is called with force=TRUE
1363  *  to forcibly disable ULP.
1364  */
1365 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1366 {
1367 	s32 ret_val = E1000_SUCCESS;
1368 	u32 mac_reg;
1369 	u16 phy_reg;
1370 	int i = 0;
1371 
1372 	if ((hw->mac.type < e1000_pch_lpt) ||
1373 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1374 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1375 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1376 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1377 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1378 		return 0;
1379 
1380 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1381 		if (force) {
1382 			/* Request ME un-configure ULP mode in the PHY */
1383 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1384 			mac_reg &= ~E1000_H2ME_ULP;
1385 			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1386 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1387 		}
1388 
1389 		/* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1390 		while (E1000_READ_REG(hw, E1000_FWSM) &
1391 		       E1000_FWSM_ULP_CFG_DONE) {
1392 			if (i++ == 10) {
1393 				ret_val = -E1000_ERR_PHY;
1394 				goto out;
1395 			}
1396 
1397 			msec_delay(10);
1398 		}
1399 		DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1400 
1401 		if (force) {
1402 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1403 			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1404 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1405 		} else {
1406 			/* Clear H2ME.ULP after ME ULP configuration */
1407 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1408 			mac_reg &= ~E1000_H2ME_ULP;
1409 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1410 		}
1411 
1412 		goto out;
1413 	}
1414 
1415 	ret_val = hw->phy.ops.acquire(hw);
1416 	if (ret_val)
1417 		goto out;
1418 
1419 	if (force)
1420 		/* Toggle LANPHYPC Value bit */
1421 		e1000_toggle_lanphypc_pch_lpt(hw);
1422 
1423 	/* Unforce SMBus mode in PHY */
1424 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1425 	if (ret_val) {
1426 		/* The MAC might be in PCIe mode, so temporarily force to
1427 		 * SMBus mode in order to access the PHY.
1428 		 */
1429 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1430 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1431 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1432 
1433 		msec_delay(50);
1434 
1435 		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1436 						       &phy_reg);
1437 		if (ret_val)
1438 			goto release;
1439 	}
1440 	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1441 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1442 
1443 	/* Unforce SMBus mode in MAC */
1444 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1445 	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1446 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1447 
1448 	/* When ULP mode was previously entered, K1 was disabled by the
1449 	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1450 	 */
1451 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1452 	if (ret_val)
1453 		goto release;
1454 	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1455 	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1456 
1457 	/* Clear ULP enabled configuration */
1458 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1459 	if (ret_val)
1460 		goto release;
1461 	phy_reg &= ~(I218_ULP_CONFIG1_IND |
1462 		     I218_ULP_CONFIG1_STICKY_ULP |
1463 		     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1464 		     I218_ULP_CONFIG1_WOL_HOST |
1465 		     I218_ULP_CONFIG1_INBAND_EXIT |
1466 		     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1467 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1468 
1469 	/* Commit ULP changes by starting auto ULP configuration */
1470 	phy_reg |= I218_ULP_CONFIG1_START;
1471 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1472 
1473 	/* Clear Disable SMBus Release on PERST# in MAC */
1474 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1475 	mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1476 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1477 
1478 release:
1479 	hw->phy.ops.release(hw);
1480 	if (force) {
1481 		hw->phy.ops.reset(hw);
1482 		msec_delay(50);
1483 	}
1484 out:
1485 	if (ret_val)
1486 		DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1487 	else
1488 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1489 
1490 	return ret_val;
1491 }
1492 
1493 /**
1494  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1495  *  @hw: pointer to the HW structure
1496  *
1497  *  Checks to see of the link status of the hardware has changed.  If a
1498  *  change in link status has been detected, then we read the PHY registers
1499  *  to get the current speed/duplex if link exists.
1500  **/
1501 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1502 {
1503 	struct e1000_mac_info *mac = &hw->mac;
1504 	s32 ret_val;
1505 	bool link;
1506 	u16 phy_reg;
1507 
1508 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1509 
1510 	/* We only want to go out to the PHY registers to see if Auto-Neg
1511 	 * has completed and/or if our link status has changed.  The
1512 	 * get_link_status flag is set upon receiving a Link Status
1513 	 * Change or Rx Sequence Error interrupt.
1514 	 */
1515 	if (!mac->get_link_status)
1516 		return E1000_SUCCESS;
1517 
1518 	/* First we want to see if the MII Status Register reports
1519 	 * link.  If so, then we want to get the current speed/duplex
1520 	 * of the PHY.
1521 	 */
1522 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1523 	if (ret_val)
1524 		return ret_val;
1525 
1526 	if (hw->mac.type == e1000_pchlan) {
1527 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1528 		if (ret_val)
1529 			return ret_val;
1530 	}
1531 
1532 	/* When connected at 10Mbps half-duplex, some parts are excessively
1533 	 * aggressive resulting in many collisions. To avoid this, increase
1534 	 * the IPG and reduce Rx latency in the PHY.
1535 	 */
1536 	if (((hw->mac.type == e1000_pch2lan) ||
1537 	     (hw->mac.type == e1000_pch_lpt) ||
1538 	     (hw->mac.type == e1000_pch_spt)) && link) {
1539 		u32 reg;
1540 		reg = E1000_READ_REG(hw, E1000_STATUS);
1541 		if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1542 			u16 emi_addr;
1543 
1544 			reg = E1000_READ_REG(hw, E1000_TIPG);
1545 			reg &= ~E1000_TIPG_IPGT_MASK;
1546 			reg |= 0xFF;
1547 			E1000_WRITE_REG(hw, E1000_TIPG, reg);
1548 
1549 			/* Reduce Rx latency in analog PHY */
1550 			ret_val = hw->phy.ops.acquire(hw);
1551 			if (ret_val)
1552 				return ret_val;
1553 
1554 			if (hw->mac.type == e1000_pch2lan)
1555 				emi_addr = I82579_RX_CONFIG;
1556 			else
1557 				emi_addr = I217_RX_CONFIG;
1558 			ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1559 
1560 			hw->phy.ops.release(hw);
1561 
1562 			if (ret_val)
1563 				return ret_val;
1564 		} else if (hw->mac.type == e1000_pch_spt &&
1565 			   (reg & E1000_STATUS_FD) &&
1566 			   (reg & E1000_STATUS_SPEED_MASK) == E1000_STATUS_SPEED_1000) {
1567 			reg &= ~E1000_TIPG_IPGT_MASK;
1568 			reg |= 0x0C;
1569 			E1000_WRITE_REG(hw, E1000_TIPG, reg);
1570 
1571 			ret_val = hw->phy.ops.acquire(hw);
1572 			if (ret_val)
1573 				return ret_val;
1574 
1575 			ret_val = e1000_write_emi_reg_locked(hw, I217_RX_CONFIG, 1);
1576 
1577 			hw->phy.ops.release(hw);
1578 
1579 			if (ret_val)
1580 				return ret_val;
1581 		}
1582 
1583 		/*
1584 		 * What is this for?
1585 		 */
1586 		reg = E1000_READ_REG(hw, E1000_STATUS);
1587 		if (hw->mac.type == e1000_pch_spt &&
1588 		   (reg & E1000_STATUS_FD) &&
1589 		   (reg & E1000_STATUS_SPEED_MASK) == E1000_STATUS_SPEED_1000) {
1590 			u16 data;
1591 			u16 ptr_gap;
1592 
1593 			ret_val = hw->phy.ops.acquire(hw);
1594 			if (ret_val)
1595 				return ret_val;
1596 			hw->phy.ops.read_reg_locked(hw, PHY_REG(776, 20), &data);
1597 			ptr_gap = (data & (0x3FF << 2)) >> 2;
1598 			if (ptr_gap < 0x18) {
1599 				data &= ~(0x3FF << 2);
1600 				data |= (0x18 << 2);
1601 				hw->phy.ops.write_reg_locked(hw,
1602 						     PHY_REG(776, 20),
1603 						     data);
1604 			}
1605 			hw->phy.ops.release(hw);
1606 
1607 			if (ret_val)
1608 				return ret_val;
1609 		}
1610 	}
1611 
1612 	/* I217 Packet Loss issue:
1613 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
1614 	 * on power up.
1615 	 * Set the Beacon Duration for I217 to 8 usec
1616 	 */
1617 	if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
1618 		u32 mac_reg;
1619 
1620 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1621 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1622 		mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1623 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1624 	}
1625 
1626 	/* Work-around I218 hang issue */
1627 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1628 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1629 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1630 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1631 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1632 		if (ret_val)
1633 			return ret_val;
1634 	}
1635 
1636 	if (hw->mac.type == e1000_pch_lpt ||
1637 	    hw->mac.type == e1000_pch_spt) {
1638 		/* Set platform power management values for
1639 		 * Latency Tolerance Reporting (LTR)
1640 		 * Optimized Buffer Flush/Fill (OBFF)
1641 		 */
1642 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1643 		if (ret_val)
1644 			return ret_val;
1645 	}
1646 
1647 	/* Clear link partner's EEE ability */
1648 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1649 
1650 	/* FEXTNVM6 K1-off workaround */
1651 	if (hw->mac.type == e1000_pch_spt) {
1652 		u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1653 		u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1654 
1655 		if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1656 			fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1657 		else
1658 			fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1659 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1660 	}
1661 
1662 	if (!link)
1663 		return E1000_SUCCESS; /* No link detected */
1664 
1665 	mac->get_link_status = FALSE;
1666 
1667 	switch (hw->mac.type) {
1668 	case e1000_pch2lan:
1669 		ret_val = e1000_k1_workaround_lv(hw);
1670 		if (ret_val)
1671 			return ret_val;
1672 		/* fall-thru */
1673 	case e1000_pchlan:
1674 		if (hw->phy.type == e1000_phy_82578) {
1675 			ret_val = e1000_link_stall_workaround_hv(hw);
1676 			if (ret_val)
1677 				return ret_val;
1678 		}
1679 
1680 		/* Workaround for PCHx parts in half-duplex:
1681 		 * Set the number of preambles removed from the packet
1682 		 * when it is passed from the PHY to the MAC to prevent
1683 		 * the MAC from misinterpreting the packet type.
1684 		 */
1685 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1686 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1687 
1688 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1689 		    E1000_STATUS_FD)
1690 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1691 
1692 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1693 		break;
1694 	default:
1695 		break;
1696 	}
1697 
1698 	/* Check if there was DownShift, must be checked
1699 	 * immediately after link-up
1700 	 */
1701 	e1000_check_downshift_generic(hw);
1702 
1703 	/* Enable/Disable EEE after link up */
1704 	if (hw->phy.type > e1000_phy_82579) {
1705 		ret_val = e1000_set_eee_pchlan(hw);
1706 		if (ret_val)
1707 			return ret_val;
1708 	}
1709 
1710 	/* If we are forcing speed/duplex, then we simply return since
1711 	 * we have already determined whether we have link or not.
1712 	 */
1713 	if (!mac->autoneg)
1714 		return -E1000_ERR_CONFIG;
1715 
1716 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1717 	 * of MAC speed/duplex configuration.  So we only need to
1718 	 * configure Collision Distance in the MAC.
1719 	 */
1720 	mac->ops.config_collision_dist(hw);
1721 
1722 	/* Configure Flow Control now that Auto-Neg has completed.
1723 	 * First, we need to restore the desired flow control
1724 	 * settings because we may have had to re-autoneg with a
1725 	 * different link partner.
1726 	 */
1727 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1728 	if (ret_val)
1729 		DEBUGOUT("Error configuring flow control\n");
1730 
1731 	return ret_val;
1732 }
1733 
1734 /**
1735  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1736  *  @hw: pointer to the HW structure
1737  *
1738  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1739  **/
1740 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1741 {
1742 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1743 
1744 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1745 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1746 	switch (hw->mac.type) {
1747 	case e1000_ich8lan:
1748 	case e1000_ich9lan:
1749 	case e1000_ich10lan:
1750 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1751 		break;
1752 	case e1000_pchlan:
1753 	case e1000_pch2lan:
1754 	case e1000_pch_lpt:
1755 	case e1000_pch_spt:
1756 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1757 		break;
1758 	default:
1759 		break;
1760 	}
1761 }
1762 
1763 /**
1764  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1765  *  @hw: pointer to the HW structure
1766  *
1767  *  Acquires the mutex for performing NVM operations.
1768  **/
1769 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1770 {
1771 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1772 	return E1000_SUCCESS;
1773 }
1774 
1775 /**
1776  *  e1000_release_nvm_ich8lan - Release NVM mutex
1777  *  @hw: pointer to the HW structure
1778  *
1779  *  Releases the mutex used while performing NVM operations.
1780  **/
1781 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1782 {
1783 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1784 	return;
1785 }
1786 
1787 /**
1788  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1789  *  @hw: pointer to the HW structure
1790  *
1791  *  Acquires the software control flag for performing PHY and select
1792  *  MAC CSR accesses.
1793  **/
1794 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1795 {
1796 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1797 	s32 ret_val = E1000_SUCCESS;
1798 
1799 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1800 
1801 	while (timeout) {
1802 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1803 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1804 			break;
1805 
1806 		msec_delay_irq(1);
1807 		timeout--;
1808 	}
1809 
1810 	if (!timeout) {
1811 		DEBUGOUT("SW has already locked the resource.\n");
1812 		ret_val = -E1000_ERR_CONFIG;
1813 		goto out;
1814 	}
1815 
1816 	timeout = SW_FLAG_TIMEOUT;
1817 
1818 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1819 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1820 
1821 	while (timeout) {
1822 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1823 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1824 			break;
1825 
1826 		msec_delay_irq(1);
1827 		timeout--;
1828 	}
1829 
1830 	if (!timeout) {
1831 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1832 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1833 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1834 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1835 		ret_val = -E1000_ERR_CONFIG;
1836 		goto out;
1837 	}
1838 
1839 out:
1840 	return ret_val;
1841 }
1842 
1843 /**
1844  *  e1000_release_swflag_ich8lan - Release software control flag
1845  *  @hw: pointer to the HW structure
1846  *
1847  *  Releases the software control flag for performing PHY and select
1848  *  MAC CSR accesses.
1849  **/
1850 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1851 {
1852 	u32 extcnf_ctrl;
1853 
1854 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1855 
1856 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1857 
1858 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1859 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1860 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1861 	} else {
1862 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1863 	}
1864 	return;
1865 }
1866 
1867 /**
1868  *  e1000_check_mng_mode_ich8lan - Checks management mode
1869  *  @hw: pointer to the HW structure
1870  *
1871  *  This checks if the adapter has any manageability enabled.
1872  *  This is a function pointer entry point only called by read/write
1873  *  routines for the PHY and NVM parts.
1874  **/
1875 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1876 {
1877 	u32 fwsm;
1878 
1879 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1880 
1881 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1882 
1883 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1884 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1885 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1886 }
1887 
1888 /**
1889  *  e1000_check_mng_mode_pchlan - Checks management mode
1890  *  @hw: pointer to the HW structure
1891  *
1892  *  This checks if the adapter has iAMT enabled.
1893  *  This is a function pointer entry point only called by read/write
1894  *  routines for the PHY and NVM parts.
1895  **/
1896 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1897 {
1898 	u32 fwsm;
1899 
1900 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1901 
1902 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1903 
1904 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1905 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1906 }
1907 
1908 /**
1909  *  e1000_rar_set_pch2lan - Set receive address register
1910  *  @hw: pointer to the HW structure
1911  *  @addr: pointer to the receive address
1912  *  @index: receive address array register
1913  *
1914  *  Sets the receive address array register at index to the address passed
1915  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1916  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1917  *  Use SHRA[0-3] in place of those reserved for ME.
1918  **/
1919 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1920 {
1921 	u32 rar_low, rar_high;
1922 
1923 	DEBUGFUNC("e1000_rar_set_pch2lan");
1924 
1925 	/* HW expects these in little endian so we reverse the byte order
1926 	 * from network order (big endian) to little endian
1927 	 */
1928 	rar_low = ((u32) addr[0] |
1929 		   ((u32) addr[1] << 8) |
1930 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1931 
1932 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1933 
1934 	/* If MAC address zero, no need to set the AV bit */
1935 	if (rar_low || rar_high)
1936 		rar_high |= E1000_RAH_AV;
1937 
1938 	if (index == 0) {
1939 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1940 		E1000_WRITE_FLUSH(hw);
1941 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1942 		E1000_WRITE_FLUSH(hw);
1943 		return E1000_SUCCESS;
1944 	}
1945 
1946 	/* RAR[1-6] are owned by manageability.  Skip those and program the
1947 	 * next address into the SHRA register array.
1948 	 */
1949 	if (index < (u32) (hw->mac.rar_entry_count)) {
1950 		s32 ret_val;
1951 
1952 		ret_val = e1000_acquire_swflag_ich8lan(hw);
1953 		if (ret_val)
1954 			goto out;
1955 
1956 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1957 		E1000_WRITE_FLUSH(hw);
1958 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1959 		E1000_WRITE_FLUSH(hw);
1960 
1961 		e1000_release_swflag_ich8lan(hw);
1962 
1963 		/* verify the register updates */
1964 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1965 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1966 			return E1000_SUCCESS;
1967 
1968 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1969 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1970 	}
1971 
1972 out:
1973 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1974 	return -E1000_ERR_CONFIG;
1975 }
1976 
1977 /**
1978  *  e1000_rar_set_pch_lpt - Set receive address registers
1979  *  @hw: pointer to the HW structure
1980  *  @addr: pointer to the receive address
1981  *  @index: receive address array register
1982  *
1983  *  Sets the receive address register array at index to the address passed
1984  *  in by addr. For LPT, RAR[0] is the base address register that is to
1985  *  contain the MAC address. SHRA[0-10] are the shared receive address
1986  *  registers that are shared between the Host and manageability engine (ME).
1987  **/
1988 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1989 {
1990 	u32 rar_low, rar_high;
1991 	u32 wlock_mac;
1992 
1993 	DEBUGFUNC("e1000_rar_set_pch_lpt");
1994 
1995 	/* HW expects these in little endian so we reverse the byte order
1996 	 * from network order (big endian) to little endian
1997 	 */
1998 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1999 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2000 
2001 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2002 
2003 	/* If MAC address zero, no need to set the AV bit */
2004 	if (rar_low || rar_high)
2005 		rar_high |= E1000_RAH_AV;
2006 
2007 	if (index == 0) {
2008 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2009 		E1000_WRITE_FLUSH(hw);
2010 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2011 		E1000_WRITE_FLUSH(hw);
2012 		return E1000_SUCCESS;
2013 	}
2014 
2015 	/* The manageability engine (ME) can lock certain SHRAR registers that
2016 	 * it is using - those registers are unavailable for use.
2017 	 */
2018 	if (index < hw->mac.rar_entry_count) {
2019 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2020 			    E1000_FWSM_WLOCK_MAC_MASK;
2021 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2022 
2023 		/* Check if all SHRAR registers are locked */
2024 		if (wlock_mac == 1)
2025 			goto out;
2026 
2027 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
2028 			s32 ret_val;
2029 
2030 			ret_val = e1000_acquire_swflag_ich8lan(hw);
2031 
2032 			if (ret_val)
2033 				goto out;
2034 
2035 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2036 					rar_low);
2037 			E1000_WRITE_FLUSH(hw);
2038 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2039 					rar_high);
2040 			E1000_WRITE_FLUSH(hw);
2041 
2042 			e1000_release_swflag_ich8lan(hw);
2043 
2044 			/* verify the register updates */
2045 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2046 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2047 				return E1000_SUCCESS;
2048 		}
2049 	}
2050 
2051 out:
2052 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2053 	return -E1000_ERR_CONFIG;
2054 }
2055 
2056 /**
2057  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2058  *  @hw: pointer to the HW structure
2059  *  @mc_addr_list: array of multicast addresses to program
2060  *  @mc_addr_count: number of multicast addresses to program
2061  *
2062  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2063  *  The caller must have a packed mc_addr_list of multicast addresses.
2064  **/
2065 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2066 					      u8 *mc_addr_list,
2067 					      u32 mc_addr_count)
2068 {
2069 	u16 phy_reg = 0;
2070 	int i;
2071 	s32 ret_val;
2072 
2073 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2074 
2075 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2076 
2077 	ret_val = hw->phy.ops.acquire(hw);
2078 	if (ret_val)
2079 		return;
2080 
2081 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2082 	if (ret_val)
2083 		goto release;
2084 
2085 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
2086 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2087 					   (u16)(hw->mac.mta_shadow[i] &
2088 						 0xFFFF));
2089 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2090 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
2091 						 0xFFFF));
2092 	}
2093 
2094 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2095 
2096 release:
2097 	hw->phy.ops.release(hw);
2098 }
2099 
2100 /**
2101  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2102  *  @hw: pointer to the HW structure
2103  *
2104  *  Checks if firmware is blocking the reset of the PHY.
2105  *  This is a function pointer entry point only called by
2106  *  reset routines.
2107  **/
2108 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2109 {
2110 	u32 fwsm;
2111 	bool blocked = FALSE;
2112 	int i = 0;
2113 
2114 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
2115 
2116 	do {
2117 		fwsm = E1000_READ_REG(hw, E1000_FWSM);
2118 		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2119 			blocked = TRUE;
2120 			msec_delay(10);
2121 			continue;
2122 		}
2123 		blocked = FALSE;
2124 	} while (blocked && (i++ < 10));
2125 	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2126 }
2127 
2128 /**
2129  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2130  *  @hw: pointer to the HW structure
2131  *
2132  *  Assumes semaphore already acquired.
2133  *
2134  **/
2135 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2136 {
2137 	u16 phy_data;
2138 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2139 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2140 		E1000_STRAP_SMT_FREQ_SHIFT;
2141 	s32 ret_val;
2142 
2143 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2144 
2145 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2146 	if (ret_val)
2147 		return ret_val;
2148 
2149 	phy_data &= ~HV_SMB_ADDR_MASK;
2150 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2151 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2152 
2153 	if (hw->phy.type == e1000_phy_i217) {
2154 		/* Restore SMBus frequency */
2155 		if (freq--) {
2156 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2157 			phy_data |= (freq & (1 << 0)) <<
2158 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
2159 			phy_data |= (freq & (1 << 1)) <<
2160 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2161 		} else {
2162 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
2163 		}
2164 	}
2165 
2166 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2167 }
2168 
2169 /**
2170  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2171  *  @hw:   pointer to the HW structure
2172  *
2173  *  SW should configure the LCD from the NVM extended configuration region
2174  *  as a workaround for certain parts.
2175  **/
2176 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2177 {
2178 	struct e1000_phy_info *phy = &hw->phy;
2179 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2180 	s32 ret_val = E1000_SUCCESS;
2181 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2182 
2183 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2184 
2185 	/* Initialize the PHY from the NVM on ICH platforms.  This
2186 	 * is needed due to an issue where the NVM configuration is
2187 	 * not properly autoloaded after power transitions.
2188 	 * Therefore, after each PHY reset, we will load the
2189 	 * configuration data out of the NVM manually.
2190 	 */
2191 	switch (hw->mac.type) {
2192 	case e1000_ich8lan:
2193 		if (phy->type != e1000_phy_igp_3)
2194 			return ret_val;
2195 
2196 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2197 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2198 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2199 			break;
2200 		}
2201 		/* Fall-thru */
2202 	case e1000_pchlan:
2203 	case e1000_pch2lan:
2204 	case e1000_pch_lpt:
2205 	case e1000_pch_spt:
2206 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2207 		break;
2208 	default:
2209 		return ret_val;
2210 	}
2211 
2212 	ret_val = hw->phy.ops.acquire(hw);
2213 	if (ret_val)
2214 		return ret_val;
2215 
2216 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
2217 	if (!(data & sw_cfg_mask))
2218 		goto release;
2219 
2220 	/* Make sure HW does not configure LCD from PHY
2221 	 * extended configuration before SW configuration
2222 	 */
2223 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2224 	if ((hw->mac.type < e1000_pch2lan) &&
2225 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2226 			goto release;
2227 
2228 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2229 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2230 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2231 	if (!cnf_size)
2232 		goto release;
2233 
2234 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2235 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2236 
2237 	if (((hw->mac.type == e1000_pchlan) &&
2238 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2239 	    (hw->mac.type > e1000_pchlan)) {
2240 		/* HW configures the SMBus address and LEDs when the
2241 		 * OEM and LCD Write Enable bits are set in the NVM.
2242 		 * When both NVM bits are cleared, SW will configure
2243 		 * them instead.
2244 		 */
2245 		ret_val = e1000_write_smbus_addr(hw);
2246 		if (ret_val)
2247 			goto release;
2248 
2249 		data = E1000_READ_REG(hw, E1000_LEDCTL);
2250 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2251 							(u16)data);
2252 		if (ret_val)
2253 			goto release;
2254 	}
2255 
2256 	/* Configure LCD from extended configuration region. */
2257 
2258 	/* cnf_base_addr is in DWORD */
2259 	word_addr = (u16)(cnf_base_addr << 1);
2260 
2261 	for (i = 0; i < cnf_size; i++) {
2262 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2263 					   &reg_data);
2264 		if (ret_val)
2265 			goto release;
2266 
2267 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2268 					   1, &reg_addr);
2269 		if (ret_val)
2270 			goto release;
2271 
2272 		/* Save off the PHY page for future writes. */
2273 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2274 			phy_page = reg_data;
2275 			continue;
2276 		}
2277 
2278 		reg_addr &= PHY_REG_MASK;
2279 		reg_addr |= phy_page;
2280 
2281 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2282 						    reg_data);
2283 		if (ret_val)
2284 			goto release;
2285 	}
2286 
2287 release:
2288 	hw->phy.ops.release(hw);
2289 	return ret_val;
2290 }
2291 
2292 /**
2293  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2294  *  @hw:   pointer to the HW structure
2295  *  @link: link up bool flag
2296  *
2297  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2298  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2299  *  If link is down, the function will restore the default K1 setting located
2300  *  in the NVM.
2301  **/
2302 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2303 {
2304 	s32 ret_val = E1000_SUCCESS;
2305 	u16 status_reg = 0;
2306 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2307 
2308 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
2309 
2310 	if (hw->mac.type != e1000_pchlan)
2311 		return E1000_SUCCESS;
2312 
2313 	/* Wrap the whole flow with the sw flag */
2314 	ret_val = hw->phy.ops.acquire(hw);
2315 	if (ret_val)
2316 		return ret_val;
2317 
2318 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2319 	if (link) {
2320 		if (hw->phy.type == e1000_phy_82578) {
2321 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2322 							      &status_reg);
2323 			if (ret_val)
2324 				goto release;
2325 
2326 			status_reg &= (BM_CS_STATUS_LINK_UP |
2327 				       BM_CS_STATUS_RESOLVED |
2328 				       BM_CS_STATUS_SPEED_MASK);
2329 
2330 			if (status_reg == (BM_CS_STATUS_LINK_UP |
2331 					   BM_CS_STATUS_RESOLVED |
2332 					   BM_CS_STATUS_SPEED_1000))
2333 				k1_enable = FALSE;
2334 		}
2335 
2336 		if (hw->phy.type == e1000_phy_82577) {
2337 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2338 							      &status_reg);
2339 			if (ret_val)
2340 				goto release;
2341 
2342 			status_reg &= (HV_M_STATUS_LINK_UP |
2343 				       HV_M_STATUS_AUTONEG_COMPLETE |
2344 				       HV_M_STATUS_SPEED_MASK);
2345 
2346 			if (status_reg == (HV_M_STATUS_LINK_UP |
2347 					   HV_M_STATUS_AUTONEG_COMPLETE |
2348 					   HV_M_STATUS_SPEED_1000))
2349 				k1_enable = FALSE;
2350 		}
2351 
2352 		/* Link stall fix for link up */
2353 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2354 						       0x0100);
2355 		if (ret_val)
2356 			goto release;
2357 
2358 	} else {
2359 		/* Link stall fix for link down */
2360 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2361 						       0x4100);
2362 		if (ret_val)
2363 			goto release;
2364 	}
2365 
2366 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2367 
2368 release:
2369 	hw->phy.ops.release(hw);
2370 
2371 	return ret_val;
2372 }
2373 
2374 /**
2375  *  e1000_configure_k1_ich8lan - Configure K1 power state
2376  *  @hw: pointer to the HW structure
2377  *  @enable: K1 state to configure
2378  *
2379  *  Configure the K1 power state based on the provided parameter.
2380  *  Assumes semaphore already acquired.
2381  *
2382  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2383  **/
2384 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2385 {
2386 	s32 ret_val;
2387 	u32 ctrl_reg = 0;
2388 	u32 ctrl_ext = 0;
2389 	u32 reg = 0;
2390 	u16 kmrn_reg = 0;
2391 
2392 	DEBUGFUNC("e1000_configure_k1_ich8lan");
2393 
2394 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2395 					     &kmrn_reg);
2396 	if (ret_val)
2397 		return ret_val;
2398 
2399 	if (k1_enable)
2400 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2401 	else
2402 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2403 
2404 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2405 					      kmrn_reg);
2406 	if (ret_val)
2407 		return ret_val;
2408 
2409 	usec_delay(20);
2410 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2411 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2412 
2413 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2414 	reg |= E1000_CTRL_FRCSPD;
2415 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2416 
2417 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2418 	E1000_WRITE_FLUSH(hw);
2419 	usec_delay(20);
2420 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2421 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2422 	E1000_WRITE_FLUSH(hw);
2423 	usec_delay(20);
2424 
2425 	return E1000_SUCCESS;
2426 }
2427 
2428 /**
2429  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2430  *  @hw:       pointer to the HW structure
2431  *  @d0_state: boolean if entering d0 or d3 device state
2432  *
2433  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2434  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2435  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2436  **/
2437 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2438 {
2439 	s32 ret_val = 0;
2440 	u32 mac_reg;
2441 	u16 oem_reg;
2442 
2443 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2444 
2445 	if (hw->mac.type < e1000_pchlan)
2446 		return ret_val;
2447 
2448 	ret_val = hw->phy.ops.acquire(hw);
2449 	if (ret_val)
2450 		return ret_val;
2451 
2452 	if (hw->mac.type == e1000_pchlan) {
2453 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2454 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2455 			goto release;
2456 	}
2457 
2458 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2459 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2460 		goto release;
2461 
2462 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2463 
2464 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2465 	if (ret_val)
2466 		goto release;
2467 
2468 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2469 
2470 	if (d0_state) {
2471 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2472 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2473 
2474 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2475 			oem_reg |= HV_OEM_BITS_LPLU;
2476 	} else {
2477 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2478 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2479 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2480 
2481 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2482 		    E1000_PHY_CTRL_NOND0A_LPLU))
2483 			oem_reg |= HV_OEM_BITS_LPLU;
2484 	}
2485 
2486 	/* Set Restart auto-neg to activate the bits */
2487 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2488 	    !hw->phy.ops.check_reset_block(hw))
2489 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2490 
2491 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2492 
2493 release:
2494 	hw->phy.ops.release(hw);
2495 
2496 	return ret_val;
2497 }
2498 
2499 
2500 /**
2501  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2502  *  @hw:   pointer to the HW structure
2503  **/
2504 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2505 {
2506 	s32 ret_val;
2507 	u16 data;
2508 
2509 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2510 
2511 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2512 	if (ret_val)
2513 		return ret_val;
2514 
2515 	data |= HV_KMRN_MDIO_SLOW;
2516 
2517 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2518 
2519 	return ret_val;
2520 }
2521 
2522 /**
2523  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2524  *  done after every PHY reset.
2525  **/
2526 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2527 {
2528 	s32 ret_val = E1000_SUCCESS;
2529 	u16 phy_data;
2530 
2531 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2532 
2533 	if (hw->mac.type != e1000_pchlan)
2534 		return E1000_SUCCESS;
2535 
2536 	/* Set MDIO slow mode before any other MDIO access */
2537 	if (hw->phy.type == e1000_phy_82577) {
2538 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2539 		if (ret_val)
2540 			return ret_val;
2541 	}
2542 
2543 	if (((hw->phy.type == e1000_phy_82577) &&
2544 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2545 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2546 		/* Disable generation of early preamble */
2547 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2548 		if (ret_val)
2549 			return ret_val;
2550 
2551 		/* Preamble tuning for SSC */
2552 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2553 						0xA204);
2554 		if (ret_val)
2555 			return ret_val;
2556 	}
2557 
2558 	if (hw->phy.type == e1000_phy_82578) {
2559 		/* Return registers to default by doing a soft reset then
2560 		 * writing 0x3140 to the control register.
2561 		 */
2562 		if (hw->phy.revision < 2) {
2563 			e1000_phy_sw_reset_generic(hw);
2564 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2565 							0x3140);
2566 		}
2567 	}
2568 
2569 	/* Select page 0 */
2570 	ret_val = hw->phy.ops.acquire(hw);
2571 	if (ret_val)
2572 		return ret_val;
2573 
2574 	hw->phy.addr = 1;
2575 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2576 	hw->phy.ops.release(hw);
2577 	if (ret_val)
2578 		return ret_val;
2579 
2580 	/* Configure the K1 Si workaround during phy reset assuming there is
2581 	 * link so that it disables K1 if link is in 1Gbps.
2582 	 */
2583 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2584 	if (ret_val)
2585 		return ret_val;
2586 
2587 	/* Workaround for link disconnects on a busy hub in half duplex */
2588 	ret_val = hw->phy.ops.acquire(hw);
2589 	if (ret_val)
2590 		return ret_val;
2591 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2592 	if (ret_val)
2593 		goto release;
2594 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2595 					       phy_data & 0x00FF);
2596 	if (ret_val)
2597 		goto release;
2598 
2599 	/* set MSE higher to enable link to stay up when noise is high */
2600 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2601 release:
2602 	hw->phy.ops.release(hw);
2603 
2604 	return ret_val;
2605 }
2606 
2607 /**
2608  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2609  *  @hw:   pointer to the HW structure
2610  **/
2611 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2612 {
2613 	u32 mac_reg;
2614 	u16 i, phy_reg = 0;
2615 	s32 ret_val;
2616 
2617 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2618 
2619 	ret_val = hw->phy.ops.acquire(hw);
2620 	if (ret_val)
2621 		return;
2622 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2623 	if (ret_val)
2624 		goto release;
2625 
2626 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2627 	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2628 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2629 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2630 					   (u16)(mac_reg & 0xFFFF));
2631 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2632 					   (u16)((mac_reg >> 16) & 0xFFFF));
2633 
2634 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2635 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2636 					   (u16)(mac_reg & 0xFFFF));
2637 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2638 					   (u16)((mac_reg & E1000_RAH_AV)
2639 						 >> 16));
2640 	}
2641 
2642 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2643 
2644 release:
2645 	hw->phy.ops.release(hw);
2646 }
2647 
2648 static u32 e1000_calc_rx_da_crc(u8 mac[])
2649 {
2650 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2651 	u32 i, j, mask, crc;
2652 
2653 	DEBUGFUNC("e1000_calc_rx_da_crc");
2654 
2655 	crc = 0xffffffff;
2656 	for (i = 0; i < 6; i++) {
2657 		crc = crc ^ mac[i];
2658 		for (j = 8; j > 0; j--) {
2659 			mask = (crc & 1) * (-1);
2660 			crc = (crc >> 1) ^ (poly & mask);
2661 		}
2662 	}
2663 	return ~crc;
2664 }
2665 
2666 /**
2667  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2668  *  with 82579 PHY
2669  *  @hw: pointer to the HW structure
2670  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2671  **/
2672 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2673 {
2674 	s32 ret_val = E1000_SUCCESS;
2675 	u16 phy_reg, data;
2676 	u32 mac_reg;
2677 	u16 i;
2678 
2679 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2680 
2681 	if (hw->mac.type < e1000_pch2lan)
2682 		return E1000_SUCCESS;
2683 
2684 	/* disable Rx path while enabling/disabling workaround */
2685 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2686 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2687 					phy_reg | (1 << 14));
2688 	if (ret_val)
2689 		return ret_val;
2690 
2691 	if (enable) {
2692 		/* Write Rx addresses (rar_entry_count for RAL/H, and
2693 		 * SHRAL/H) and initial CRC values to the MAC
2694 		 */
2695 		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2696 			u8 mac_addr[ETH_ADDR_LEN] = {0};
2697 			u32 addr_high, addr_low;
2698 
2699 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2700 			if (!(addr_high & E1000_RAH_AV))
2701 				continue;
2702 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2703 			mac_addr[0] = (addr_low & 0xFF);
2704 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2705 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2706 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2707 			mac_addr[4] = (addr_high & 0xFF);
2708 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2709 
2710 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2711 					e1000_calc_rx_da_crc(mac_addr));
2712 		}
2713 
2714 		/* Write Rx addresses to the PHY */
2715 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2716 
2717 		/* Enable jumbo frame workaround in the MAC */
2718 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2719 		mac_reg &= ~(1 << 14);
2720 		mac_reg |= (7 << 15);
2721 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2722 
2723 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2724 		mac_reg |= E1000_RCTL_SECRC;
2725 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2726 
2727 		ret_val = e1000_read_kmrn_reg_generic(hw,
2728 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2729 						&data);
2730 		if (ret_val)
2731 			return ret_val;
2732 		ret_val = e1000_write_kmrn_reg_generic(hw,
2733 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2734 						data | (1 << 0));
2735 		if (ret_val)
2736 			return ret_val;
2737 		ret_val = e1000_read_kmrn_reg_generic(hw,
2738 						E1000_KMRNCTRLSTA_HD_CTRL,
2739 						&data);
2740 		if (ret_val)
2741 			return ret_val;
2742 		data &= ~(0xF << 8);
2743 		data |= (0xB << 8);
2744 		ret_val = e1000_write_kmrn_reg_generic(hw,
2745 						E1000_KMRNCTRLSTA_HD_CTRL,
2746 						data);
2747 		if (ret_val)
2748 			return ret_val;
2749 
2750 		/* Enable jumbo frame workaround in the PHY */
2751 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2752 		data &= ~(0x7F << 5);
2753 		data |= (0x37 << 5);
2754 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2755 		if (ret_val)
2756 			return ret_val;
2757 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2758 		data &= ~(1 << 13);
2759 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2760 		if (ret_val)
2761 			return ret_val;
2762 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2763 		data &= ~(0x3FF << 2);
2764 		data |= (E1000_TX_PTR_GAP << 2);
2765 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2766 		if (ret_val)
2767 			return ret_val;
2768 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2769 		if (ret_val)
2770 			return ret_val;
2771 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2772 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2773 						(1 << 10));
2774 		if (ret_val)
2775 			return ret_val;
2776 	} else {
2777 		/* Write MAC register values back to h/w defaults */
2778 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2779 		mac_reg &= ~(0xF << 14);
2780 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2781 
2782 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2783 		mac_reg &= ~E1000_RCTL_SECRC;
2784 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2785 
2786 		ret_val = e1000_read_kmrn_reg_generic(hw,
2787 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2788 						&data);
2789 		if (ret_val)
2790 			return ret_val;
2791 		ret_val = e1000_write_kmrn_reg_generic(hw,
2792 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2793 						data & ~(1 << 0));
2794 		if (ret_val)
2795 			return ret_val;
2796 		ret_val = e1000_read_kmrn_reg_generic(hw,
2797 						E1000_KMRNCTRLSTA_HD_CTRL,
2798 						&data);
2799 		if (ret_val)
2800 			return ret_val;
2801 		data &= ~(0xF << 8);
2802 		data |= (0xB << 8);
2803 		ret_val = e1000_write_kmrn_reg_generic(hw,
2804 						E1000_KMRNCTRLSTA_HD_CTRL,
2805 						data);
2806 		if (ret_val)
2807 			return ret_val;
2808 
2809 		/* Write PHY register values back to h/w defaults */
2810 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2811 		data &= ~(0x7F << 5);
2812 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2813 		if (ret_val)
2814 			return ret_val;
2815 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2816 		data |= (1 << 13);
2817 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2818 		if (ret_val)
2819 			return ret_val;
2820 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2821 		data &= ~(0x3FF << 2);
2822 		data |= (0x8 << 2);
2823 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2824 		if (ret_val)
2825 			return ret_val;
2826 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2827 		if (ret_val)
2828 			return ret_val;
2829 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2830 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2831 						~(1 << 10));
2832 		if (ret_val)
2833 			return ret_val;
2834 	}
2835 
2836 	/* re-enable Rx path after enabling/disabling workaround */
2837 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2838 				     ~(1 << 14));
2839 }
2840 
2841 /**
2842  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2843  *  done after every PHY reset.
2844  **/
2845 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2846 {
2847 	s32 ret_val = E1000_SUCCESS;
2848 
2849 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2850 
2851 	if (hw->mac.type != e1000_pch2lan)
2852 		return E1000_SUCCESS;
2853 
2854 	/* Set MDIO slow mode before any other MDIO access */
2855 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2856 	if (ret_val)
2857 		return ret_val;
2858 
2859 	ret_val = hw->phy.ops.acquire(hw);
2860 	if (ret_val)
2861 		return ret_val;
2862 	/* set MSE higher to enable link to stay up when noise is high */
2863 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2864 	if (ret_val)
2865 		goto release;
2866 	/* drop link after 5 times MSE threshold was reached */
2867 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2868 release:
2869 	hw->phy.ops.release(hw);
2870 
2871 	return ret_val;
2872 }
2873 
2874 /**
2875  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2876  *  @hw:   pointer to the HW structure
2877  *
2878  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2879  *  Disable K1 for 1000 and 100 speeds
2880  **/
2881 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2882 {
2883 	s32 ret_val = E1000_SUCCESS;
2884 	u16 status_reg = 0;
2885 
2886 	DEBUGFUNC("e1000_k1_workaround_lv");
2887 
2888 	if (hw->mac.type != e1000_pch2lan)
2889 		return E1000_SUCCESS;
2890 
2891 	/* Set K1 beacon duration based on 10Mbs speed */
2892 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2893 	if (ret_val)
2894 		return ret_val;
2895 
2896 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2897 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2898 		if (status_reg &
2899 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2900 			u16 pm_phy_reg;
2901 
2902 			/* LV 1G/100 Packet drop issue wa  */
2903 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2904 						       &pm_phy_reg);
2905 			if (ret_val)
2906 				return ret_val;
2907 			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2908 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2909 							pm_phy_reg);
2910 			if (ret_val)
2911 				return ret_val;
2912 		} else {
2913 			u32 mac_reg;
2914 			mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2915 			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2916 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2917 			E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2918 		}
2919 	}
2920 
2921 	return ret_val;
2922 }
2923 
2924 /**
2925  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2926  *  @hw:   pointer to the HW structure
2927  *  @gate: boolean set to TRUE to gate, FALSE to ungate
2928  *
2929  *  Gate/ungate the automatic PHY configuration via hardware; perform
2930  *  the configuration via software instead.
2931  **/
2932 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2933 {
2934 	u32 extcnf_ctrl;
2935 
2936 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2937 
2938 	if (hw->mac.type < e1000_pch2lan)
2939 		return;
2940 
2941 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2942 
2943 	if (gate)
2944 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2945 	else
2946 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2947 
2948 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2949 }
2950 
2951 /**
2952  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2953  *  @hw: pointer to the HW structure
2954  *
2955  *  Check the appropriate indication the MAC has finished configuring the
2956  *  PHY after a software reset.
2957  **/
2958 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2959 {
2960 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2961 
2962 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
2963 
2964 	/* Wait for basic configuration completes before proceeding */
2965 	do {
2966 		data = E1000_READ_REG(hw, E1000_STATUS);
2967 		data &= E1000_STATUS_LAN_INIT_DONE;
2968 		usec_delay(100);
2969 	} while ((!data) && --loop);
2970 
2971 	/* If basic configuration is incomplete before the above loop
2972 	 * count reaches 0, loading the configuration from NVM will
2973 	 * leave the PHY in a bad state possibly resulting in no link.
2974 	 */
2975 	if (loop == 0)
2976 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2977 
2978 	/* Clear the Init Done bit for the next init event */
2979 	data = E1000_READ_REG(hw, E1000_STATUS);
2980 	data &= ~E1000_STATUS_LAN_INIT_DONE;
2981 	E1000_WRITE_REG(hw, E1000_STATUS, data);
2982 }
2983 
2984 /**
2985  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2986  *  @hw: pointer to the HW structure
2987  **/
2988 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2989 {
2990 	s32 ret_val = E1000_SUCCESS;
2991 	u16 reg;
2992 
2993 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2994 
2995 	if (hw->phy.ops.check_reset_block(hw))
2996 		return E1000_SUCCESS;
2997 
2998 	/* Allow time for h/w to get to quiescent state after reset */
2999 	msec_delay(10);
3000 
3001 	/* Perform any necessary post-reset workarounds */
3002 	switch (hw->mac.type) {
3003 	case e1000_pchlan:
3004 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3005 		if (ret_val)
3006 			return ret_val;
3007 		break;
3008 	case e1000_pch2lan:
3009 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3010 		if (ret_val)
3011 			return ret_val;
3012 		break;
3013 	default:
3014 		break;
3015 	}
3016 
3017 	/* Clear the host wakeup bit after lcd reset */
3018 	if (hw->mac.type >= e1000_pchlan) {
3019 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3020 		reg &= ~BM_WUC_HOST_WU_BIT;
3021 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3022 	}
3023 
3024 	/* Configure the LCD with the extended configuration region in NVM */
3025 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
3026 	if (ret_val)
3027 		return ret_val;
3028 
3029 	/* Configure the LCD with the OEM bits in NVM */
3030 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3031 
3032 	if (hw->mac.type == e1000_pch2lan) {
3033 		/* Ungate automatic PHY configuration on non-managed 82579 */
3034 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
3035 		    E1000_ICH_FWSM_FW_VALID)) {
3036 			msec_delay(10);
3037 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3038 		}
3039 
3040 		/* Set EEE LPI Update Timer to 200usec */
3041 		ret_val = hw->phy.ops.acquire(hw);
3042 		if (ret_val)
3043 			return ret_val;
3044 		ret_val = e1000_write_emi_reg_locked(hw,
3045 						     I82579_LPI_UPDATE_TIMER,
3046 						     0x1387);
3047 		hw->phy.ops.release(hw);
3048 	}
3049 
3050 	return ret_val;
3051 }
3052 
3053 /**
3054  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3055  *  @hw: pointer to the HW structure
3056  *
3057  *  Resets the PHY
3058  *  This is a function pointer entry point called by drivers
3059  *  or other shared routines.
3060  **/
3061 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3062 {
3063 	s32 ret_val = E1000_SUCCESS;
3064 
3065 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3066 
3067 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
3068 	if ((hw->mac.type == e1000_pch2lan) &&
3069 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3070 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3071 
3072 	ret_val = e1000_phy_hw_reset_generic(hw);
3073 	if (ret_val)
3074 		return ret_val;
3075 
3076 	return e1000_post_phy_reset_ich8lan(hw);
3077 }
3078 
3079 /**
3080  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3081  *  @hw: pointer to the HW structure
3082  *  @active: TRUE to enable LPLU, FALSE to disable
3083  *
3084  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3085  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3086  *  the phy speed. This function will manually set the LPLU bit and restart
3087  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3088  *  since it configures the same bit.
3089  **/
3090 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3091 {
3092 	s32 ret_val;
3093 	u16 oem_reg;
3094 
3095 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
3096 
3097 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3098 	if (ret_val)
3099 		return ret_val;
3100 
3101 	if (active)
3102 		oem_reg |= HV_OEM_BITS_LPLU;
3103 	else
3104 		oem_reg &= ~HV_OEM_BITS_LPLU;
3105 
3106 	if (!hw->phy.ops.check_reset_block(hw))
3107 		oem_reg |= HV_OEM_BITS_RESTART_AN;
3108 
3109 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3110 }
3111 
3112 /**
3113  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3114  *  @hw: pointer to the HW structure
3115  *  @active: TRUE to enable LPLU, FALSE to disable
3116  *
3117  *  Sets the LPLU D0 state according to the active flag.  When
3118  *  activating LPLU this function also disables smart speed
3119  *  and vice versa.  LPLU will not be activated unless the
3120  *  device autonegotiation advertisement meets standards of
3121  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3122  *  This is a function pointer entry point only called by
3123  *  PHY setup routines.
3124  **/
3125 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3126 {
3127 	struct e1000_phy_info *phy = &hw->phy;
3128 	u32 phy_ctrl;
3129 	s32 ret_val = E1000_SUCCESS;
3130 	u16 data;
3131 
3132 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3133 
3134 	if (phy->type == e1000_phy_ife)
3135 		return E1000_SUCCESS;
3136 
3137 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3138 
3139 	if (active) {
3140 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3141 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3142 
3143 		if (phy->type != e1000_phy_igp_3)
3144 			return E1000_SUCCESS;
3145 
3146 		/* Call gig speed drop workaround on LPLU before accessing
3147 		 * any PHY registers
3148 		 */
3149 		if (hw->mac.type == e1000_ich8lan)
3150 			e1000_gig_downshift_workaround_ich8lan(hw);
3151 
3152 		/* When LPLU is enabled, we should disable SmartSpeed */
3153 		ret_val = phy->ops.read_reg(hw,
3154 					    IGP01E1000_PHY_PORT_CONFIG,
3155 					    &data);
3156 		if (ret_val)
3157 			return ret_val;
3158 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3159 		ret_val = phy->ops.write_reg(hw,
3160 					     IGP01E1000_PHY_PORT_CONFIG,
3161 					     data);
3162 		if (ret_val)
3163 			return ret_val;
3164 	} else {
3165 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3166 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3167 
3168 		if (phy->type != e1000_phy_igp_3)
3169 			return E1000_SUCCESS;
3170 
3171 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3172 		 * during Dx states where the power conservation is most
3173 		 * important.  During driver activity we should enable
3174 		 * SmartSpeed, so performance is maintained.
3175 		 */
3176 		if (phy->smart_speed == e1000_smart_speed_on) {
3177 			ret_val = phy->ops.read_reg(hw,
3178 						    IGP01E1000_PHY_PORT_CONFIG,
3179 						    &data);
3180 			if (ret_val)
3181 				return ret_val;
3182 
3183 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3184 			ret_val = phy->ops.write_reg(hw,
3185 						     IGP01E1000_PHY_PORT_CONFIG,
3186 						     data);
3187 			if (ret_val)
3188 				return ret_val;
3189 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3190 			ret_val = phy->ops.read_reg(hw,
3191 						    IGP01E1000_PHY_PORT_CONFIG,
3192 						    &data);
3193 			if (ret_val)
3194 				return ret_val;
3195 
3196 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3197 			ret_val = phy->ops.write_reg(hw,
3198 						     IGP01E1000_PHY_PORT_CONFIG,
3199 						     data);
3200 			if (ret_val)
3201 				return ret_val;
3202 		}
3203 	}
3204 
3205 	return E1000_SUCCESS;
3206 }
3207 
3208 /**
3209  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3210  *  @hw: pointer to the HW structure
3211  *  @active: TRUE to enable LPLU, FALSE to disable
3212  *
3213  *  Sets the LPLU D3 state according to the active flag.  When
3214  *  activating LPLU this function also disables smart speed
3215  *  and vice versa.  LPLU will not be activated unless the
3216  *  device autonegotiation advertisement meets standards of
3217  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3218  *  This is a function pointer entry point only called by
3219  *  PHY setup routines.
3220  **/
3221 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3222 {
3223 	struct e1000_phy_info *phy = &hw->phy;
3224 	u32 phy_ctrl;
3225 	s32 ret_val = E1000_SUCCESS;
3226 	u16 data;
3227 
3228 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3229 
3230 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3231 
3232 	if (!active) {
3233 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3234 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3235 
3236 		if (phy->type != e1000_phy_igp_3)
3237 			return E1000_SUCCESS;
3238 
3239 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3240 		 * during Dx states where the power conservation is most
3241 		 * important.  During driver activity we should enable
3242 		 * SmartSpeed, so performance is maintained.
3243 		 */
3244 		if (phy->smart_speed == e1000_smart_speed_on) {
3245 			ret_val = phy->ops.read_reg(hw,
3246 						    IGP01E1000_PHY_PORT_CONFIG,
3247 						    &data);
3248 			if (ret_val)
3249 				return ret_val;
3250 
3251 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3252 			ret_val = phy->ops.write_reg(hw,
3253 						     IGP01E1000_PHY_PORT_CONFIG,
3254 						     data);
3255 			if (ret_val)
3256 				return ret_val;
3257 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3258 			ret_val = phy->ops.read_reg(hw,
3259 						    IGP01E1000_PHY_PORT_CONFIG,
3260 						    &data);
3261 			if (ret_val)
3262 				return ret_val;
3263 
3264 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3265 			ret_val = phy->ops.write_reg(hw,
3266 						     IGP01E1000_PHY_PORT_CONFIG,
3267 						     data);
3268 			if (ret_val)
3269 				return ret_val;
3270 		}
3271 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3272 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3273 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3274 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3275 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3276 
3277 		if (phy->type != e1000_phy_igp_3)
3278 			return E1000_SUCCESS;
3279 
3280 		/* Call gig speed drop workaround on LPLU before accessing
3281 		 * any PHY registers
3282 		 */
3283 		if (hw->mac.type == e1000_ich8lan)
3284 			e1000_gig_downshift_workaround_ich8lan(hw);
3285 
3286 		/* When LPLU is enabled, we should disable SmartSpeed */
3287 		ret_val = phy->ops.read_reg(hw,
3288 					    IGP01E1000_PHY_PORT_CONFIG,
3289 					    &data);
3290 		if (ret_val)
3291 			return ret_val;
3292 
3293 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3294 		ret_val = phy->ops.write_reg(hw,
3295 					     IGP01E1000_PHY_PORT_CONFIG,
3296 					     data);
3297 	}
3298 
3299 	return ret_val;
3300 }
3301 
3302 /**
3303  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3304  *  @hw: pointer to the HW structure
3305  *  @bank:  pointer to the variable that returns the active bank
3306  *
3307  *  Reads signature byte from the NVM using the flash access registers.
3308  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3309  **/
3310 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3311 {
3312 	u32 eecd;
3313 	struct e1000_nvm_info *nvm = &hw->nvm;
3314 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3315 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3316 	u8 sig_byte = 0;
3317 	s32 ret_val;
3318 
3319 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3320 
3321 	switch (hw->mac.type) {
3322 	case e1000_pch_spt:
3323 		*bank = E1000_READ_REG(hw, E1000_CTRL_EXT) & E1000_CTRL_EXT_NVMVS;
3324 		if (*bank == 0 || *bank == 1) {
3325 			return -E1000_ERR_NVM;
3326 		} else {
3327 			*bank = *bank - 2;
3328 			return 0;
3329 		}
3330 		break;
3331 	case e1000_ich8lan:
3332 	case e1000_ich9lan:
3333 		eecd = E1000_READ_REG(hw, E1000_EECD);
3334 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3335 		    E1000_EECD_SEC1VAL_VALID_MASK) {
3336 			if (eecd & E1000_EECD_SEC1VAL)
3337 				*bank = 1;
3338 			else
3339 				*bank = 0;
3340 
3341 			return E1000_SUCCESS;
3342 		}
3343 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3344 		/* fall-thru */
3345 	default:
3346 		/* set bank to 0 in case flash read fails */
3347 		*bank = 0;
3348 
3349 		/* Check bank 0 */
3350 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3351 							&sig_byte);
3352 		if (ret_val)
3353 			return ret_val;
3354 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3355 		    E1000_ICH_NVM_SIG_VALUE) {
3356 			*bank = 0;
3357 			return E1000_SUCCESS;
3358 		}
3359 
3360 		/* Check bank 1 */
3361 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3362 							bank1_offset,
3363 							&sig_byte);
3364 		if (ret_val)
3365 			return ret_val;
3366 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3367 		    E1000_ICH_NVM_SIG_VALUE) {
3368 			*bank = 1;
3369 			return E1000_SUCCESS;
3370 		}
3371 
3372 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3373 		return -E1000_ERR_NVM;
3374 	}
3375 }
3376 
3377 /**
3378  *  e1000_read_nvm_spt - Read word(s) from the NVM
3379  *  @hw: pointer to the HW structure
3380  *  @offset: The offset (in bytes) of the word(s) to read.
3381  *  @words: Size of data to read in words
3382  *  @data: Pointer to the word(s) to read at offset.
3383  *
3384  *  Reads a word(s) from the NVM using the flash access registers.
3385  **/
3386 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3387 			      u16 *data)
3388 {
3389 	struct e1000_nvm_info *nvm = &hw->nvm;
3390 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3391 	u32 act_offset;
3392 	s32 ret_val = E1000_SUCCESS;
3393 	u32 bank = 0;
3394 	u32 dword;
3395 	u16 use_offset;
3396 	u16 i;
3397 
3398 	DEBUGFUNC("e1000_read_nvm_spt");
3399 
3400 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3401 	    (words == 0)) {
3402 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3403 		ret_val = -E1000_ERR_NVM;
3404 		goto out;
3405 	}
3406 
3407 	nvm->ops.acquire(hw);
3408 
3409 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3410 	if (ret_val != E1000_SUCCESS) {
3411 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3412 		bank = 0;
3413 	}
3414 
3415 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3416 	act_offset += offset;
3417 
3418 	ret_val = E1000_SUCCESS;
3419 	for (i = 0; i < words; i += 2) {
3420 		if (words - i == 1) {
3421 			if (dev_spec->shadow_ram[offset+i].modified) {
3422 				data[i] = dev_spec->shadow_ram[offset+i].value;
3423 			} else {
3424 				use_offset = act_offset + i -
3425 					     (act_offset + i) % 2;
3426 				ret_val = e1000_read_flash_dword_ich8lan(
3427 							hw,
3428 							use_offset,
3429 							&dword);
3430 				if (ret_val)
3431 					break;
3432 				if ((act_offset + i) % 2 == 0)
3433 					data[i] = (u16)(dword & 0xFFFF);
3434 				else
3435 					data[i] = (u16)((dword >> 16) & 0xFFFF);
3436 			}
3437 		} else {
3438 			use_offset = act_offset + i;
3439 			dword = 0;	/* avoid gcc warnings */
3440 			if (!(dev_spec->shadow_ram[offset + i].modified) ||
3441 			    !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3442 				ret_val =
3443 				 e1000_read_flash_dword_ich8lan(hw,
3444 					use_offset, &dword);
3445 				if (ret_val)
3446 					break;
3447 			}
3448 			if (dev_spec->shadow_ram[offset + i].modified)
3449 				data[i] = dev_spec->shadow_ram[offset + i].value;
3450 			else
3451 				data[i] = (u16)(dword & 0xFFFF);
3452 			if (dev_spec->shadow_ram[offset + i].modified)
3453 				data[i + 1] =
3454 				    dev_spec->shadow_ram[offset + i + 1].value;
3455 			else
3456 				data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3457 		}
3458 	}
3459 
3460 	nvm->ops.release(hw);
3461 
3462 out:
3463 	if (ret_val)
3464 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3465 
3466 	return ret_val;
3467 }
3468 
3469 /**
3470  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3471  *  @hw: pointer to the HW structure
3472  *  @offset: The offset (in bytes) of the word(s) to read.
3473  *  @words: Size of data to read in words
3474  *  @data: Pointer to the word(s) to read at offset.
3475  *
3476  *  Reads a word(s) from the NVM using the flash access registers.
3477  **/
3478 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3479 				  u16 *data)
3480 {
3481 	struct e1000_nvm_info *nvm = &hw->nvm;
3482 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3483 	u32 act_offset;
3484 	s32 ret_val = E1000_SUCCESS;
3485 	u32 bank = 0;
3486 	u16 i, word;
3487 
3488 	DEBUGFUNC("e1000_read_nvm_ich8lan");
3489 
3490 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3491 	    (words == 0)) {
3492 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3493 		ret_val = -E1000_ERR_NVM;
3494 		goto out;
3495 	}
3496 
3497 	nvm->ops.acquire(hw);
3498 
3499 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3500 	if (ret_val != E1000_SUCCESS) {
3501 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3502 		bank = 0;
3503 	}
3504 
3505 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3506 	act_offset += offset;
3507 
3508 	ret_val = E1000_SUCCESS;
3509 	for (i = 0; i < words; i++) {
3510 		if (dev_spec->shadow_ram[offset+i].modified) {
3511 			data[i] = dev_spec->shadow_ram[offset+i].value;
3512 		} else {
3513 			ret_val = e1000_read_flash_word_ich8lan(hw,
3514 								act_offset + i,
3515 								&word);
3516 			if (ret_val)
3517 				break;
3518 			data[i] = word;
3519 		}
3520 	}
3521 
3522 	nvm->ops.release(hw);
3523 
3524 out:
3525 	if (ret_val)
3526 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3527 
3528 	return ret_val;
3529 }
3530 
3531 /**
3532  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3533  *  @hw: pointer to the HW structure
3534  *
3535  *  This function does initial flash setup so that a new read/write/erase cycle
3536  *  can be started.
3537  **/
3538 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3539 {
3540 	union ich8_hws_flash_status hsfsts;
3541 	s32 ret_val = -E1000_ERR_NVM;
3542 
3543 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3544 
3545 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3546 
3547 	/* Check if the flash descriptor is valid */
3548 	if (!hsfsts.hsf_status.fldesvalid) {
3549 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3550 		return -E1000_ERR_NVM;
3551 	}
3552 
3553 	/* Clear FCERR and DAEL in hw status by writing 1 */
3554 	hsfsts.hsf_status.flcerr = 1;
3555 	hsfsts.hsf_status.dael = 1;
3556 	if (hw->mac.type == e1000_pch_spt)
3557 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3558 	else
3559 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3560 
3561 	/* Either we should have a hardware SPI cycle in progress
3562 	 * bit to check against, in order to start a new cycle or
3563 	 * FDONE bit should be changed in the hardware so that it
3564 	 * is 1 after hardware reset, which can then be used as an
3565 	 * indication whether a cycle is in progress or has been
3566 	 * completed.
3567 	 */
3568 
3569 	if (!hsfsts.hsf_status.flcinprog) {
3570 		/* There is no cycle running at present,
3571 		 * so we can start a cycle.
3572 		 * Begin by setting Flash Cycle Done.
3573 		 */
3574 		hsfsts.hsf_status.flcdone = 1;
3575 		if (hw->mac.type == e1000_pch_spt)
3576 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3577 		else
3578 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3579 		ret_val = E1000_SUCCESS;
3580 	} else {
3581 		s32 i;
3582 
3583 		/* Otherwise poll for sometime so the current
3584 		 * cycle has a chance to end before giving up.
3585 		 */
3586 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3587 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3588 							      ICH_FLASH_HSFSTS);
3589 			if (!hsfsts.hsf_status.flcinprog) {
3590 				ret_val = E1000_SUCCESS;
3591 				break;
3592 			}
3593 			usec_delay(1);
3594 		}
3595 		if (ret_val == E1000_SUCCESS) {
3596 			/* Successful in waiting for previous cycle to timeout,
3597 			 * now set the Flash Cycle Done.
3598 			 */
3599 			hsfsts.hsf_status.flcdone = 1;
3600 			if (hw->mac.type == e1000_pch_spt)
3601 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3602 							hsfsts.regval & 0xFFFF);
3603 			else
3604 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3605 							hsfsts.regval);
3606 		} else {
3607 			DEBUGOUT("Flash controller busy, cannot get access\n");
3608 		}
3609 	}
3610 
3611 	return ret_val;
3612 }
3613 
3614 /**
3615  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3616  *  @hw: pointer to the HW structure
3617  *  @timeout: maximum time to wait for completion
3618  *
3619  *  This function starts a flash cycle and waits for its completion.
3620  **/
3621 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3622 {
3623 	union ich8_hws_flash_ctrl hsflctl;
3624 	union ich8_hws_flash_status hsfsts;
3625 	u32 i = 0;
3626 
3627 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3628 
3629 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3630 	if (hw->mac.type == e1000_pch_spt)
3631 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
3632 	else
3633 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3634 	hsflctl.hsf_ctrl.flcgo = 1;
3635 
3636 	if (hw->mac.type == e1000_pch_spt)
3637 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3638 	else
3639 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3640 
3641 	/* wait till FDONE bit is set to 1 */
3642 	do {
3643 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3644 		if (hsfsts.hsf_status.flcdone)
3645 			break;
3646 		usec_delay(1);
3647 	} while (i++ < timeout);
3648 
3649 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3650 		return E1000_SUCCESS;
3651 
3652 	return -E1000_ERR_NVM;
3653 }
3654 
3655 /**
3656  *  e1000_read_flash_word_ich8lan - Read word from flash
3657  *  @hw: pointer to the HW structure
3658  *  @offset: offset to data location
3659  *  @data: pointer to the location for storing the data
3660  *
3661  *  Reads the flash word at offset into data.  Offset is converted
3662  *  to bytes before read.
3663  **/
3664 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3665 					 u16 *data)
3666 {
3667 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3668 
3669 	if (!data)
3670 		return -E1000_ERR_NVM;
3671 
3672 	/* Must convert offset into bytes. */
3673 	offset <<= 1;
3674 
3675 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3676 }
3677 
3678 /**
3679  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3680  *  @hw: pointer to the HW structure
3681  *  @offset: offset to data location
3682  *  @data: pointer to the location for storing the data
3683  *
3684  *  Reads the flash word at offset into data.  Offset is converted
3685  *  to bytes before read.
3686  **/
3687 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3688 					 u32 *data)
3689 {
3690 	DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3691 
3692 	if (!data)
3693 		return -E1000_ERR_NVM;
3694 
3695 	/* Must convert offset into bytes. */
3696 	offset <<= 1;
3697 
3698 	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3699 }
3700 
3701 /**
3702  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3703  *  @hw: pointer to the HW structure
3704  *  @offset: The offset of the byte to read.
3705  *  @data: Pointer to a byte to store the value read.
3706  *
3707  *  Reads a single byte from the NVM using the flash access registers.
3708  **/
3709 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3710 					 u8 *data)
3711 {
3712 	s32 ret_val;
3713 	u16 word = 0;
3714 
3715 	if (hw->mac.type == e1000_pch_spt)
3716 		return -E1000_ERR_NVM;
3717 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3718 
3719 	if (ret_val)
3720 		return ret_val;
3721 
3722 	*data = (u8)word;
3723 
3724 	return E1000_SUCCESS;
3725 }
3726 
3727 /**
3728  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3729  *  @hw: pointer to the HW structure
3730  *  @offset: The offset (in bytes) of the byte or word to read.
3731  *  @size: Size of data to read, 1=byte 2=word
3732  *  @data: Pointer to the word to store the value read.
3733  *
3734  *  Reads a byte or word from the NVM using the flash access registers.
3735  **/
3736 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3737 					 u8 size, u16 *data)
3738 {
3739 	union ich8_hws_flash_status hsfsts;
3740 	union ich8_hws_flash_ctrl hsflctl;
3741 	u32 flash_linear_addr;
3742 	u32 flash_data = 0;
3743 	s32 ret_val = -E1000_ERR_NVM;
3744 	u8 count = 0;
3745 
3746 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3747 
3748 	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3749 		return -E1000_ERR_NVM;
3750 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3751 			     hw->nvm.flash_base_addr);
3752 
3753 	do {
3754 		usec_delay(1);
3755 		/* Steps */
3756 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3757 		if (ret_val != E1000_SUCCESS)
3758 			break;
3759 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3760 
3761 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3762 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3763 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3764 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3765 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3766 
3767 		ret_val = e1000_flash_cycle_ich8lan(hw,
3768 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3769 
3770 		/* Check if FCERR is set to 1, if set to 1, clear it
3771 		 * and try the whole sequence a few more times, else
3772 		 * read in (shift in) the Flash Data0, the order is
3773 		 * least significant byte first msb to lsb
3774 		 */
3775 		if (ret_val == E1000_SUCCESS) {
3776 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3777 			if (size == 1)
3778 				*data = (u8)(flash_data & 0x000000FF);
3779 			else if (size == 2)
3780 				*data = (u16)(flash_data & 0x0000FFFF);
3781 			break;
3782 		} else {
3783 			/* If we've gotten here, then things are probably
3784 			 * completely hosed, but if the error condition is
3785 			 * detected, it won't hurt to give it another try...
3786 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3787 			 */
3788 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3789 							      ICH_FLASH_HSFSTS);
3790 			if (hsfsts.hsf_status.flcerr) {
3791 				/* Repeat for some time before giving up. */
3792 				continue;
3793 			} else if (!hsfsts.hsf_status.flcdone) {
3794 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3795 				break;
3796 			}
3797 		}
3798 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3799 
3800 	return ret_val;
3801 }
3802 
3803 /**
3804  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3805  *  @hw: pointer to the HW structure
3806  *  @offset: The offset (in bytes) of the byte or word to read.
3807  *  @size: Size of data to read, 1=byte 2=word
3808  *  @data: Pointer to the word to store the value read.
3809  *
3810  *  Reads a byte or word from the NVM using the flash access registers.
3811  **/
3812 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3813 					 u32 *data)
3814 {
3815 	union ich8_hws_flash_status hsfsts;
3816 	union ich8_hws_flash_ctrl hsflctl;
3817 	u32 flash_linear_addr;
3818 	s32 ret_val = -E1000_ERR_NVM;
3819 	u8 count = 0;
3820 
3821 	DEBUGFUNC("e1000_read_flash_data32_ich8lan");
3822 
3823 	*data = 0;	/* avoid gcc warning */
3824 
3825 	if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3826 	    hw->mac.type != e1000_pch_spt)
3827 		return -E1000_ERR_NVM;
3828 
3829 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3830 			     hw->nvm.flash_base_addr);
3831 
3832 	do {
3833 		usec_delay(1);
3834 		/* Steps */
3835 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3836 		if (ret_val != E1000_SUCCESS)
3837 			break;
3838 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
3839 
3840 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3841 		hsflctl.hsf_ctrl.fldbcount = sizeof(int32_t) - 1;
3842 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3843 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3844 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3845 
3846 		ret_val = e1000_flash_cycle_ich8lan(hw,
3847 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3848 
3849 		/* Check if FCERR is set to 1, if set to 1, clear it
3850 		 * and try the whole sequence a few more times, else
3851 		 * read in (shift in) the Flash Data0, the order is
3852 		 * least significant byte first msb to lsb
3853 		 */
3854 		if (ret_val == E1000_SUCCESS) {
3855 			*data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3856 			break;
3857 		} else {
3858 			/* If we've gotten here, then things are probably
3859 			 * completely hosed, but if the error condition is
3860 			 * detected, it won't hurt to give it another try...
3861 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3862 			 */
3863 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3864 							      ICH_FLASH_HSFSTS);
3865 			if (hsfsts.hsf_status.flcerr) {
3866 				/* Repeat for some time before giving up. */
3867 				continue;
3868 			} else if (!hsfsts.hsf_status.flcdone) {
3869 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3870 				break;
3871 			}
3872 		}
3873 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3874 
3875 	return ret_val;
3876 }
3877 
3878 
3879 /**
3880  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3881  *  @hw: pointer to the HW structure
3882  *  @offset: The offset (in bytes) of the word(s) to write.
3883  *  @words: Size of data to write in words
3884  *  @data: Pointer to the word(s) to write at offset.
3885  *
3886  *  Writes a byte or word to the NVM using the flash access registers.
3887  **/
3888 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3889 				   u16 *data)
3890 {
3891 	struct e1000_nvm_info *nvm = &hw->nvm;
3892 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3893 	u16 i;
3894 
3895 	DEBUGFUNC("e1000_write_nvm_ich8lan");
3896 
3897 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3898 	    (words == 0)) {
3899 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3900 		return -E1000_ERR_NVM;
3901 	}
3902 
3903 	nvm->ops.acquire(hw);
3904 
3905 	for (i = 0; i < words; i++) {
3906 		dev_spec->shadow_ram[offset+i].modified = TRUE;
3907 		dev_spec->shadow_ram[offset+i].value = data[i];
3908 	}
3909 
3910 	nvm->ops.release(hw);
3911 
3912 	return E1000_SUCCESS;
3913 }
3914 
3915 /**
3916  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
3917  *  @hw: pointer to the HW structure
3918  *
3919  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3920  *  which writes the checksum to the shadow ram.  The changes in the shadow
3921  *  ram are then committed to the EEPROM by processing each bank at a time
3922  *  checking for the modified bit and writing only the pending changes.
3923  *  After a successful commit, the shadow ram is cleared and is ready for
3924  *  future writes.
3925  **/
3926 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3927 {
3928 	struct e1000_nvm_info *nvm = &hw->nvm;
3929 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3930 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3931 	s32 ret_val;
3932 	u32 data32 = 0;
3933 
3934 	DEBUGFUNC("e1000_update_nvm_checksum_spt");
3935 
3936 	ret_val = e1000_update_nvm_checksum_generic(hw);
3937 	if (ret_val)
3938 		goto out;
3939 
3940 	if (nvm->type != e1000_nvm_flash_sw)
3941 		goto out;
3942 
3943 	nvm->ops.acquire(hw);
3944 
3945 	/* We're writing to the opposite bank so if we're on bank 1,
3946 	 * write to bank 0 etc.  We also need to erase the segment that
3947 	 * is going to be written
3948 	 */
3949 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3950 	if (ret_val != E1000_SUCCESS) {
3951 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3952 		bank = 0;
3953 	}
3954 
3955 	if (bank == 0) {
3956 		new_bank_offset = nvm->flash_bank_size;
3957 		old_bank_offset = 0;
3958 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3959 		if (ret_val)
3960 			goto release;
3961 	} else {
3962 		old_bank_offset = nvm->flash_bank_size;
3963 		new_bank_offset = 0;
3964 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3965 		if (ret_val)
3966 			goto release;
3967 	}
3968 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
3969 		/* Determine whether to write the value stored
3970 		 * in the other NVM bank or a modified value stored
3971 		 * in the shadow RAM
3972 		 */
3973 		ret_val = e1000_read_flash_dword_ich8lan(hw,
3974 							 i + old_bank_offset,
3975 							 &data32);
3976 		if (dev_spec->shadow_ram[i].modified) {
3977 			data32 &= 0xFFFF0000;
3978 			data32 |= dev_spec->shadow_ram[i].value & 0xffff;
3979 		}
3980 		if (dev_spec->shadow_ram[i + 1].modified) {
3981 			data32 &= 0x0000FFFF;
3982 			data32 |= (dev_spec->shadow_ram[i + 1].value & 0xffff)
3983 				  << 16;
3984 		}
3985 		if (ret_val)
3986 			break;
3987 
3988 		/* If the word is 0x13, then make sure the signature bits
3989 		 * (15:14) are 11b until the commit has completed.
3990 		 * This will allow us to write 10b which indicates the
3991 		 * signature is valid.  We want to do this after the write
3992 		 * has completed so that we don't mark the segment valid
3993 		 * while the write is still in progress
3994 		 */
3995 		if (i == E1000_ICH_NVM_SIG_WORD - 1)
3996 			data32 |= E1000_ICH_NVM_SIG_MASK << 16;
3997 
3998 		/* Convert offset to bytes. */
3999 		/*act_offset = (i + new_bank_offset) << 1;*/
4000 
4001 		usec_delay(100);
4002 
4003 		/* Write the bytes to the new bank. */
4004 		act_offset = i + new_bank_offset;
4005 		ret_val = e1000_retry_write_flash_dword_ich8lan(hw,
4006 							       act_offset,
4007 							       data32);
4008 		if (ret_val)
4009 			break;
4010 	 }
4011 
4012 	/* Don't bother writing the segment valid bits if sector
4013 	 * programming failed.
4014 	 */
4015 	if (ret_val) {
4016 		DEBUGOUT("Flash commit failed.\n");
4017 		goto release;
4018 	}
4019 
4020 	/* Finally validate the new segment by setting bit 15:14
4021 	 * to 10b in word 0x13 , this can be done without an
4022 	 * erase as well since these bits are 11 to start with
4023 	 * and we need to change bit 14 to 0b
4024 	 */
4025 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4026 
4027 	/*offset in words but we read dword */
4028 	--act_offset;
4029 
4030 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &data32);
4031 	if (ret_val)
4032 		goto release;
4033 
4034 	data32 &= 0xBFFFFFFF;
4035 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset * 2 + 1,
4036 							data32);
4037 	if (ret_val)
4038 		goto release;
4039 
4040 	/* And invalidate the previously valid segment by setting
4041 	 * its signature word (0x13) high_byte to 0b. This can be
4042 	 * done without an erase because flash erase sets all bits
4043 	 * to 1's. We can write 1's to 0's without an erase
4044 	 */
4045 	/*act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;*/
4046 
4047 	/* offset in words but we read dwords */
4048 	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4049 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &data32);
4050 
4051 	if (ret_val)
4052 		goto release;
4053 
4054 	/* Great!  Everything worked, we can now clear the cached entries. */
4055 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4056 		dev_spec->shadow_ram[i].modified = FALSE;
4057 		dev_spec->shadow_ram[i].value = 0xFFFF;
4058 	}
4059 
4060 release:
4061 	nvm->ops.release(hw);
4062 
4063 	/* Reload the EEPROM, or else modifications will not appear
4064 	 * until after the next adapter reset.
4065 	 */
4066 	if (!ret_val) {
4067 		nvm->ops.reload(hw);
4068 		msec_delay(10);
4069 	}
4070 
4071 out:
4072 	if (ret_val)
4073 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4074 
4075 	return ret_val;
4076 }
4077 
4078 /**
4079  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4080  *  @hw: pointer to the HW structure
4081  *
4082  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4083  *  which writes the checksum to the shadow ram.  The changes in the shadow
4084  *  ram are then committed to the EEPROM by processing each bank at a time
4085  *  checking for the modified bit and writing only the pending changes.
4086  *  After a successful commit, the shadow ram is cleared and is ready for
4087  *  future writes.
4088  **/
4089 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4090 {
4091 	struct e1000_nvm_info *nvm = &hw->nvm;
4092 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4093 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4094 	s32 ret_val;
4095 	u16 data = 0;
4096 
4097 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4098 
4099 	ret_val = e1000_update_nvm_checksum_generic(hw);
4100 	if (ret_val)
4101 		goto out;
4102 
4103 	if (nvm->type != e1000_nvm_flash_sw)
4104 		goto out;
4105 
4106 	nvm->ops.acquire(hw);
4107 
4108 	/* We're writing to the opposite bank so if we're on bank 1,
4109 	 * write to bank 0 etc.  We also need to erase the segment that
4110 	 * is going to be written
4111 	 */
4112 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4113 	if (ret_val != E1000_SUCCESS) {
4114 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4115 		bank = 0;
4116 	}
4117 
4118 	if (bank == 0) {
4119 		new_bank_offset = nvm->flash_bank_size;
4120 		old_bank_offset = 0;
4121 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4122 		if (ret_val)
4123 			goto release;
4124 	} else {
4125 		old_bank_offset = nvm->flash_bank_size;
4126 		new_bank_offset = 0;
4127 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4128 		if (ret_val)
4129 			goto release;
4130 	}
4131 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4132 		if (dev_spec->shadow_ram[i].modified) {
4133 			data = dev_spec->shadow_ram[i].value;
4134 		} else {
4135 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
4136 								old_bank_offset,
4137 								&data);
4138 			if (ret_val)
4139 				break;
4140 		}
4141 		/* If the word is 0x13, then make sure the signature bits
4142 		 * (15:14) are 11b until the commit has completed.
4143 		 * This will allow us to write 10b which indicates the
4144 		 * signature is valid.  We want to do this after the write
4145 		 * has completed so that we don't mark the segment valid
4146 		 * while the write is still in progress
4147 		 */
4148 		if (i == E1000_ICH_NVM_SIG_WORD)
4149 			data |= E1000_ICH_NVM_SIG_MASK;
4150 
4151 		/* Convert offset to bytes. */
4152 		act_offset = (i + new_bank_offset) << 1;
4153 
4154 		usec_delay(100);
4155 
4156 		/* Write the bytes to the new bank. */
4157 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4158 							       act_offset,
4159 							       (u8)data);
4160 		if (ret_val)
4161 			break;
4162 
4163 		usec_delay(100);
4164 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4165 							  act_offset + 1,
4166 							  (u8)(data >> 8));
4167 		if (ret_val)
4168 			break;
4169 	 }
4170 
4171 	/* Don't bother writing the segment valid bits if sector
4172 	 * programming failed.
4173 	 */
4174 	if (ret_val) {
4175 		DEBUGOUT("Flash commit failed.\n");
4176 		goto release;
4177 	}
4178 
4179 	/* Finally validate the new segment by setting bit 15:14
4180 	 * to 10b in word 0x13 , this can be done without an
4181 	 * erase as well since these bits are 11 to start with
4182 	 * and we need to change bit 14 to 0b
4183 	 */
4184 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4185 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4186 	if (ret_val)
4187 		goto release;
4188 
4189 	data &= 0xBFFF;
4190 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4191 						       (u8)(data >> 8));
4192 	if (ret_val)
4193 		goto release;
4194 
4195 	/* And invalidate the previously valid segment by setting
4196 	 * its signature word (0x13) high_byte to 0b. This can be
4197 	 * done without an erase because flash erase sets all bits
4198 	 * to 1's. We can write 1's to 0's without an erase
4199 	 */
4200 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4201 
4202 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4203 
4204 	if (ret_val)
4205 		goto release;
4206 
4207 	/* Great!  Everything worked, we can now clear the cached entries. */
4208 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4209 		dev_spec->shadow_ram[i].modified = FALSE;
4210 		dev_spec->shadow_ram[i].value = 0xFFFF;
4211 	}
4212 
4213 release:
4214 	nvm->ops.release(hw);
4215 
4216 	/* Reload the EEPROM, or else modifications will not appear
4217 	 * until after the next adapter reset.
4218 	 */
4219 	if (!ret_val) {
4220 		nvm->ops.reload(hw);
4221 		msec_delay(10);
4222 	}
4223 
4224 out:
4225 	if (ret_val)
4226 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4227 
4228 	return ret_val;
4229 }
4230 
4231 /**
4232  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4233  *  @hw: pointer to the HW structure
4234  *
4235  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4236  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4237  *  calculated, in which case we need to calculate the checksum and set bit 6.
4238  **/
4239 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4240 {
4241 	s32 ret_val;
4242 	u16 data;
4243 	u16 word;
4244 	u16 valid_csum_mask;
4245 
4246 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4247 
4248 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4249 	 * the checksum needs to be fixed.  This bit is an indication that
4250 	 * the NVM was prepared by OEM software and did not calculate
4251 	 * the checksum...a likely scenario.
4252 	 */
4253 	switch (hw->mac.type) {
4254 	case e1000_pch_lpt:
4255 	case e1000_pch_spt:
4256 		word = NVM_COMPAT;
4257 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4258 		break;
4259 	default:
4260 		word = NVM_FUTURE_INIT_WORD1;
4261 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4262 		break;
4263 	}
4264 
4265 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4266 	if (ret_val)
4267 		return ret_val;
4268 
4269 	if (!(data & valid_csum_mask)) {
4270 		data |= valid_csum_mask;
4271 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4272 		if (ret_val)
4273 			return ret_val;
4274 		ret_val = hw->nvm.ops.update(hw);
4275 		if (ret_val)
4276 			return ret_val;
4277 	}
4278 
4279 	return e1000_validate_nvm_checksum_generic(hw);
4280 }
4281 
4282 /**
4283  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4284  *  @hw: pointer to the HW structure
4285  *  @offset: The offset (in bytes) of the byte/word to read.
4286  *  @size: Size of data to read, 1=byte 2=word
4287  *  @data: The byte(s) to write to the NVM.
4288  *
4289  *  Writes one/two bytes to the NVM using the flash access registers.
4290  **/
4291 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4292 					  u8 size, u16 data)
4293 {
4294 	union ich8_hws_flash_status hsfsts;
4295 	union ich8_hws_flash_ctrl hsflctl;
4296 	u32 flash_linear_addr;
4297 	u32 flash_data = 0;
4298 	s32 ret_val;
4299 	u8 count = 0;
4300 
4301 	DEBUGFUNC("e1000_write_ich8_data");
4302 
4303 	if (hw->mac.type == e1000_pch_spt) {
4304 		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4305 			return -E1000_ERR_NVM;
4306 	} else {
4307 		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4308 			return -E1000_ERR_NVM;
4309 	}
4310 
4311 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4312 			     hw->nvm.flash_base_addr);
4313 
4314 	do {
4315 		usec_delay(1);
4316 		/* Steps */
4317 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4318 		if (ret_val != E1000_SUCCESS)
4319 			break;
4320 		if (hw->mac.type == e1000_pch_spt)
4321 			hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4322 		else
4323 			hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4324 
4325 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4326 		hsflctl.hsf_ctrl.fldbcount = size - 1;
4327 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4328 		if (hw->mac.type == e1000_pch_spt)
4329 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4330 		else
4331 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
4332 
4333 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4334 
4335 		if (size == 1)
4336 			flash_data = (u32)data & 0x00FF;
4337 		else
4338 			flash_data = (u32)data;
4339 
4340 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4341 
4342 		/* check if FCERR is set to 1 , if set to 1, clear it
4343 		 * and try the whole sequence a few more times else done
4344 		 */
4345 		ret_val =
4346 		    e1000_flash_cycle_ich8lan(hw,
4347 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4348 		if (ret_val == E1000_SUCCESS)
4349 			break;
4350 
4351 		/* If we're here, then things are most likely
4352 		 * completely hosed, but if the error condition
4353 		 * is detected, it won't hurt to give it another
4354 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4355 		 */
4356 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4357 		if (hsfsts.hsf_status.flcerr)
4358 			/* Repeat for some time before giving up. */
4359 			continue;
4360 		if (!hsfsts.hsf_status.flcdone) {
4361 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4362 			break;
4363 		}
4364 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4365 
4366 	return ret_val;
4367 }
4368 
4369 /**
4370  *  e1000_write_flash_data32_ich8lan - Writes 32-bit words to the NVM
4371  *  @hw: pointer to the HW structure
4372  *  @offset: The offset (in bytes) of the 32-bit word to read.
4373  *  @data: The byte(s) to write to the NVM.
4374  *
4375  *  Writes one/two bytes to the NVM using the flash access registers.
4376  **/
4377 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4378 					    u32 data)
4379 {
4380 	union ich8_hws_flash_status hsfsts;
4381 	union ich8_hws_flash_ctrl hsflctl;
4382 	u32 flash_linear_addr;
4383 	s32 ret_val;
4384 	u8 count = 0;
4385 
4386 	DEBUGFUNC("e1000_write_ich8_data");
4387 
4388 	if (hw->mac.type == e1000_pch_spt) {
4389 		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4390 			return -E1000_ERR_NVM;
4391 	}
4392 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4393 			     hw->nvm.flash_base_addr);
4394 
4395 	do {
4396 		usec_delay(1);
4397 		/* Steps */
4398 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4399 		if (ret_val != E1000_SUCCESS)
4400 			break;
4401 		if (hw->mac.type == e1000_pch_spt) {
4402 			hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4403 		} else {
4404 			hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4405 		}
4406 
4407 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4408 		hsflctl.hsf_ctrl.fldbcount = sizeof(int32_t) - 1;
4409 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4410 
4411 		/* In SPT, This register is in Lan memory space,
4412 		 * not flash.  Therefore, only 32 bit access is
4413 		 * supported
4414 		 */
4415 		if (hw->mac.type == e1000_pch_spt) {
4416 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4417 		} else {
4418 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
4419 		}
4420 
4421 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4422 
4423 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4424 
4425 		/* check if FCERR is set to 1 , if set to 1, clear it
4426 		 * and try the whole sequence a few more times else done
4427 		 */
4428 		ret_val =
4429 		    e1000_flash_cycle_ich8lan(hw,
4430 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4431 		if (ret_val == E1000_SUCCESS)
4432 			break;
4433 
4434 		/* If we're here, then things are most likely
4435 		 * completely hosed, but if the error condition
4436 		 * is detected, it won't hurt to give it another
4437 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4438 		 */
4439 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4440 		if (hsfsts.hsf_status.flcerr)
4441 			/* Repeat for some time before giving up. */
4442 			continue;
4443 		if (!hsfsts.hsf_status.flcdone) {
4444 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4445 			break;
4446 		}
4447 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4448 
4449 	return ret_val;
4450 }
4451 
4452 
4453 /**
4454  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4455  *  @hw: pointer to the HW structure
4456  *  @offset: The index of the byte to read.
4457  *  @data: The byte to write to the NVM.
4458  *
4459  *  Writes a single byte to the NVM using the flash access registers.
4460  **/
4461 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4462 					  u8 data)
4463 {
4464 	u16 word = (u16)data;
4465 
4466 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4467 
4468 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4469 }
4470 
4471 /**
4472  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4473  *  @hw: pointer to the HW structure
4474  *  @offset: The offset of the byte to write.
4475  *  @byte: The byte to write to the NVM.
4476  *
4477  *  Writes a single byte to the NVM using the flash access registers.
4478  *  Goes through a retry algorithm before giving up.
4479  **/
4480 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4481 						u32 offset, u8 byte)
4482 {
4483 	s32 ret_val;
4484 	u16 program_retries;
4485 
4486 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4487 
4488 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4489 	if (!ret_val)
4490 		return ret_val;
4491 
4492 	for (program_retries = 0; program_retries < 100; program_retries++) {
4493 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4494 		usec_delay(100);
4495 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4496 		if (ret_val == E1000_SUCCESS)
4497 			break;
4498 	}
4499 	if (program_retries == 100)
4500 		return -E1000_ERR_NVM;
4501 
4502 	return E1000_SUCCESS;
4503 }
4504 
4505 /**
4506  *  e1000_retry_write_flash_dword_ich8lan - Writes a 32-bit word to NVM
4507  *  @hw: pointer to the HW structure
4508  *  @offset: The offset of the byte to write.
4509  *  @dword: The dword to write to the NVM.
4510  *
4511  *  Writes a single 32-bit word to the NVM using the flash access registers.
4512  *  Goes through a retry algorithm before giving up.
4513  **/
4514 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4515 						u32 offset, u32 dword)
4516 {
4517 	s32 ret_val;
4518 	u16 program_retries;
4519 
4520 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4521 
4522 	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4523 	if (!ret_val)
4524 		return ret_val;
4525 
4526 	for (program_retries = 0; program_retries < 100; program_retries++) {
4527 		DEBUGOUT2("Retrying DWord %08X at offset %u\n", dword, offset);
4528 		usec_delay(100);
4529 		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4530 		if (ret_val == E1000_SUCCESS)
4531 			break;
4532 	}
4533 	if (program_retries == 100)
4534 		return -E1000_ERR_NVM;
4535 
4536 	return E1000_SUCCESS;
4537 }
4538 
4539 /**
4540  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4541  *  @hw: pointer to the HW structure
4542  *  @bank: 0 for first bank, 1 for second bank, etc.
4543  *
4544  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4545  *  bank N is 4096 * N + flash_reg_addr.
4546  **/
4547 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4548 {
4549 	struct e1000_nvm_info *nvm = &hw->nvm;
4550 	union ich8_hws_flash_status hsfsts;
4551 	union ich8_hws_flash_ctrl hsflctl;
4552 	u32 flash_linear_addr;
4553 	/* bank size is in 16bit words - adjust to bytes */
4554 	u32 flash_bank_size = nvm->flash_bank_size * 2;
4555 	s32 ret_val;
4556 	s32 count = 0;
4557 	s32 j, iteration, sector_size;
4558 
4559 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4560 
4561 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4562 
4563 	/* Determine HW Sector size: Read BERASE bits of hw flash status
4564 	 * register
4565 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4566 	 *     consecutive sectors.  The start index for the nth Hw sector
4567 	 *     can be calculated as = bank * 4096 + n * 256
4568 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4569 	 *     The start index for the nth Hw sector can be calculated
4570 	 *     as = bank * 4096
4571 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4572 	 *     (ich9 only, otherwise error condition)
4573 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4574 	 */
4575 	switch (hsfsts.hsf_status.berasesz) {
4576 	case 0:
4577 		/* Hw sector size 256 */
4578 		sector_size = ICH_FLASH_SEG_SIZE_256;
4579 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4580 		break;
4581 	case 1:
4582 		sector_size = ICH_FLASH_SEG_SIZE_4K;
4583 		iteration = 1;
4584 		break;
4585 	case 2:
4586 		sector_size = ICH_FLASH_SEG_SIZE_8K;
4587 		iteration = 1;
4588 		break;
4589 	case 3:
4590 		sector_size = ICH_FLASH_SEG_SIZE_64K;
4591 		iteration = 1;
4592 		break;
4593 	default:
4594 		return -E1000_ERR_NVM;
4595 	}
4596 
4597 	/* Start with the base address, then add the sector offset. */
4598 	flash_linear_addr = hw->nvm.flash_base_addr;
4599 	flash_linear_addr += (bank) ? flash_bank_size : 0;
4600 
4601 	for (j = 0; j < iteration; j++) {
4602 		do {
4603 			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4604 
4605 			/* Steps */
4606 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4607 			if (ret_val)
4608 				return ret_val;
4609 
4610 			/* Write a value 11 (block Erase) in Flash
4611 			 * Cycle field in hw flash control
4612 			 */
4613 			if (hw->mac.type == e1000_pch_spt)
4614 				hsflctl.regval =
4615 				    E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4616 			else
4617 				hsflctl.regval =
4618 				    E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4619 
4620 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4621 			if (hw->mac.type == e1000_pch_spt)
4622 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4623 							hsflctl.regval << 16);
4624 			else
4625 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4626 							hsflctl.regval);
4627 
4628 			/* Write the last 24 bits of an index within the
4629 			 * block into Flash Linear address field in Flash
4630 			 * Address.
4631 			 */
4632 			flash_linear_addr += (j * sector_size);
4633 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4634 					      flash_linear_addr);
4635 
4636 			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4637 			if (ret_val == E1000_SUCCESS)
4638 				break;
4639 
4640 			/* Check if FCERR is set to 1.  If 1,
4641 			 * clear it and try the whole sequence
4642 			 * a few more times else Done
4643 			 */
4644 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4645 						      ICH_FLASH_HSFSTS);
4646 			if (hsfsts.hsf_status.flcerr)
4647 				/* repeat for some time before giving up */
4648 				continue;
4649 			else if (!hsfsts.hsf_status.flcdone)
4650 				return ret_val;
4651 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4652 	}
4653 
4654 	return E1000_SUCCESS;
4655 }
4656 
4657 /**
4658  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4659  *  @hw: pointer to the HW structure
4660  *  @data: Pointer to the LED settings
4661  *
4662  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4663  *  settings is all 0's or F's, set the LED default to a valid LED default
4664  *  setting.
4665  **/
4666 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4667 {
4668 	s32 ret_val;
4669 
4670 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
4671 
4672 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4673 	if (ret_val) {
4674 		DEBUGOUT("NVM Read Error\n");
4675 		return ret_val;
4676 	}
4677 
4678 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4679 		*data = ID_LED_DEFAULT_ICH8LAN;
4680 
4681 	return E1000_SUCCESS;
4682 }
4683 
4684 /**
4685  *  e1000_id_led_init_pchlan - store LED configurations
4686  *  @hw: pointer to the HW structure
4687  *
4688  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4689  *  the PHY LED configuration register.
4690  *
4691  *  PCH also does not have an "always on" or "always off" mode which
4692  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4693  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4694  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4695  *  link based on logic in e1000_led_[on|off]_pchlan().
4696  **/
4697 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4698 {
4699 	struct e1000_mac_info *mac = &hw->mac;
4700 	s32 ret_val;
4701 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4702 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4703 	u16 data, i, temp, shift;
4704 
4705 	DEBUGFUNC("e1000_id_led_init_pchlan");
4706 
4707 	/* Get default ID LED modes */
4708 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4709 	if (ret_val)
4710 		return ret_val;
4711 
4712 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4713 	mac->ledctl_mode1 = mac->ledctl_default;
4714 	mac->ledctl_mode2 = mac->ledctl_default;
4715 
4716 	for (i = 0; i < 4; i++) {
4717 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4718 		shift = (i * 5);
4719 		switch (temp) {
4720 		case ID_LED_ON1_DEF2:
4721 		case ID_LED_ON1_ON2:
4722 		case ID_LED_ON1_OFF2:
4723 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4724 			mac->ledctl_mode1 |= (ledctl_on << shift);
4725 			break;
4726 		case ID_LED_OFF1_DEF2:
4727 		case ID_LED_OFF1_ON2:
4728 		case ID_LED_OFF1_OFF2:
4729 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4730 			mac->ledctl_mode1 |= (ledctl_off << shift);
4731 			break;
4732 		default:
4733 			/* Do nothing */
4734 			break;
4735 		}
4736 		switch (temp) {
4737 		case ID_LED_DEF1_ON2:
4738 		case ID_LED_ON1_ON2:
4739 		case ID_LED_OFF1_ON2:
4740 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4741 			mac->ledctl_mode2 |= (ledctl_on << shift);
4742 			break;
4743 		case ID_LED_DEF1_OFF2:
4744 		case ID_LED_ON1_OFF2:
4745 		case ID_LED_OFF1_OFF2:
4746 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4747 			mac->ledctl_mode2 |= (ledctl_off << shift);
4748 			break;
4749 		default:
4750 			/* Do nothing */
4751 			break;
4752 		}
4753 	}
4754 
4755 	return E1000_SUCCESS;
4756 }
4757 
4758 /**
4759  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4760  *  @hw: pointer to the HW structure
4761  *
4762  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4763  *  register, so the the bus width is hard coded.
4764  **/
4765 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4766 {
4767 	struct e1000_bus_info *bus = &hw->bus;
4768 	s32 ret_val;
4769 
4770 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
4771 
4772 	ret_val = e1000_get_bus_info_pcie_generic(hw);
4773 
4774 	/* ICH devices are "PCI Express"-ish.  They have
4775 	 * a configuration space, but do not contain
4776 	 * PCI Express Capability registers, so bus width
4777 	 * must be hardcoded.
4778 	 */
4779 	if (bus->width == e1000_bus_width_unknown)
4780 		bus->width = e1000_bus_width_pcie_x1;
4781 
4782 	return ret_val;
4783 }
4784 
4785 /**
4786  *  e1000_reset_hw_ich8lan - Reset the hardware
4787  *  @hw: pointer to the HW structure
4788  *
4789  *  Does a full reset of the hardware which includes a reset of the PHY and
4790  *  MAC.
4791  **/
4792 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4793 {
4794 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4795 	u16 kum_cfg;
4796 	u32 ctrl, reg;
4797 	s32 ret_val;
4798 
4799 	DEBUGFUNC("e1000_reset_hw_ich8lan");
4800 
4801 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4802 	 * on the last TLP read/write transaction when MAC is reset.
4803 	 */
4804 	ret_val = e1000_disable_pcie_master_generic(hw);
4805 	if (ret_val)
4806 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
4807 
4808 	DEBUGOUT("Masking off all interrupts\n");
4809 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4810 
4811 	/* Disable the Transmit and Receive units.  Then delay to allow
4812 	 * any pending transactions to complete before we hit the MAC
4813 	 * with the global reset.
4814 	 */
4815 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
4816 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4817 	E1000_WRITE_FLUSH(hw);
4818 
4819 	msec_delay(10);
4820 
4821 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4822 	if (hw->mac.type == e1000_ich8lan) {
4823 		/* Set Tx and Rx buffer allocation to 8k apiece. */
4824 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4825 		/* Set Packet Buffer Size to 16k. */
4826 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4827 	}
4828 
4829 	if (hw->mac.type == e1000_pchlan) {
4830 		/* Save the NVM K1 bit setting*/
4831 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4832 		if (ret_val)
4833 			return ret_val;
4834 
4835 		if (kum_cfg & E1000_NVM_K1_ENABLE)
4836 			dev_spec->nvm_k1_enabled = TRUE;
4837 		else
4838 			dev_spec->nvm_k1_enabled = FALSE;
4839 	}
4840 
4841 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4842 
4843 	if (!hw->phy.ops.check_reset_block(hw)) {
4844 		/* Full-chip reset requires MAC and PHY reset at the same
4845 		 * time to make sure the interface between MAC and the
4846 		 * external PHY is reset.
4847 		 */
4848 		ctrl |= E1000_CTRL_PHY_RST;
4849 
4850 		/* Gate automatic PHY configuration by hardware on
4851 		 * non-managed 82579
4852 		 */
4853 		if ((hw->mac.type == e1000_pch2lan) &&
4854 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4855 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
4856 	}
4857 	ret_val = e1000_acquire_swflag_ich8lan(hw);
4858 	DEBUGOUT("Issuing a global reset to ich8lan\n");
4859 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4860 	/* cannot issue a flush here because it hangs the hardware */
4861 	msec_delay(20);
4862 
4863 	/* Set Phy Config Counter to 50msec */
4864 	if (hw->mac.type == e1000_pch2lan) {
4865 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4866 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4867 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4868 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4869 	}
4870 
4871 	if (ctrl & E1000_CTRL_PHY_RST) {
4872 		ret_val = hw->phy.ops.get_cfg_done(hw);
4873 		if (ret_val)
4874 			return ret_val;
4875 
4876 		ret_val = e1000_post_phy_reset_ich8lan(hw);
4877 		if (ret_val)
4878 			return ret_val;
4879 	}
4880 
4881 	/* For PCH, this write will make sure that any noise
4882 	 * will be detected as a CRC error and be dropped rather than show up
4883 	 * as a bad packet to the DMA engine.
4884 	 */
4885 	if (hw->mac.type == e1000_pchlan)
4886 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4887 
4888 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4889 	E1000_READ_REG(hw, E1000_ICR);
4890 
4891 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
4892 	reg |= E1000_KABGTXD_BGSQLBIAS;
4893 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4894 
4895 	return E1000_SUCCESS;
4896 }
4897 
4898 /**
4899  *  e1000_init_hw_ich8lan - Initialize the hardware
4900  *  @hw: pointer to the HW structure
4901  *
4902  *  Prepares the hardware for transmit and receive by doing the following:
4903  *   - initialize hardware bits
4904  *   - initialize LED identification
4905  *   - setup receive address registers
4906  *   - setup flow control
4907  *   - setup transmit descriptors
4908  *   - clear statistics
4909  **/
4910 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4911 {
4912 	struct e1000_mac_info *mac = &hw->mac;
4913 	u32 ctrl_ext, txdctl, snoop;
4914 	s32 ret_val;
4915 	u16 i;
4916 
4917 	DEBUGFUNC("e1000_init_hw_ich8lan");
4918 
4919 	e1000_initialize_hw_bits_ich8lan(hw);
4920 
4921 	/* Initialize identification LED */
4922 	ret_val = mac->ops.id_led_init(hw);
4923 	/* An error is not fatal and we should not stop init due to this */
4924 	if (ret_val)
4925 		DEBUGOUT("Error initializing identification LED\n");
4926 
4927 	/* Setup the receive address. */
4928 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4929 
4930 	/* Zero out the Multicast HASH table */
4931 	DEBUGOUT("Zeroing the MTA\n");
4932 	for (i = 0; i < mac->mta_reg_count; i++)
4933 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4934 
4935 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
4936 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
4937 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
4938 	 */
4939 	if (hw->phy.type == e1000_phy_82578) {
4940 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4941 		i &= ~BM_WUC_HOST_WU_BIT;
4942 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4943 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
4944 		if (ret_val)
4945 			return ret_val;
4946 	}
4947 
4948 	/* Setup link and flow control */
4949 	ret_val = mac->ops.setup_link(hw);
4950 
4951 	/* Set the transmit descriptor write-back policy for both queues */
4952 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4953 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4954 		  E1000_TXDCTL_FULL_TX_DESC_WB);
4955 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4956 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4957 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4958 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4959 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4960 		  E1000_TXDCTL_FULL_TX_DESC_WB);
4961 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4962 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4963 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4964 
4965 	/* ICH8 has opposite polarity of no_snoop bits.
4966 	 * By default, we should use snoop behavior.
4967 	 */
4968 	if (mac->type == e1000_ich8lan)
4969 		snoop = PCIE_ICH8_SNOOP_ALL;
4970 	else
4971 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4972 	e1000_set_pcie_no_snoop_generic(hw, snoop);
4973 
4974 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4975 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4976 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4977 
4978 	/* Clear all of the statistics registers (clear on read).  It is
4979 	 * important that we do this after we have tried to establish link
4980 	 * because the symbol error count will increment wildly if there
4981 	 * is no link.
4982 	 */
4983 	e1000_clear_hw_cntrs_ich8lan(hw);
4984 
4985 	return ret_val;
4986 }
4987 
4988 /**
4989  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4990  *  @hw: pointer to the HW structure
4991  *
4992  *  Sets/Clears required hardware bits necessary for correctly setting up the
4993  *  hardware for transmit and receive.
4994  **/
4995 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4996 {
4997 	u32 reg;
4998 
4999 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5000 
5001 	/* Extended Device Control */
5002 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5003 	reg |= (1 << 22);
5004 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
5005 	if (hw->mac.type >= e1000_pchlan)
5006 		reg |= E1000_CTRL_EXT_PHYPDEN;
5007 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5008 
5009 	/* Transmit Descriptor Control 0 */
5010 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5011 	reg |= (1 << 22);
5012 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5013 
5014 	/* Transmit Descriptor Control 1 */
5015 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5016 	reg |= (1 << 22);
5017 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5018 
5019 	/* Transmit Arbitration Control 0 */
5020 	reg = E1000_READ_REG(hw, E1000_TARC(0));
5021 	if (hw->mac.type == e1000_ich8lan)
5022 		reg |= (1 << 28) | (1 << 29);
5023 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5024 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5025 
5026 	/* Transmit Arbitration Control 1 */
5027 	reg = E1000_READ_REG(hw, E1000_TARC(1));
5028 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5029 		reg &= ~(1 << 28);
5030 	else
5031 		reg |= (1 << 28);
5032 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
5033 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5034 
5035 	/* Device Status */
5036 	if (hw->mac.type == e1000_ich8lan) {
5037 		reg = E1000_READ_REG(hw, E1000_STATUS);
5038 		reg &= ~(1 << 31);
5039 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
5040 	}
5041 
5042 	/* work-around descriptor data corruption issue during nfs v2 udp
5043 	 * traffic, just disable the nfs filtering capability
5044 	 */
5045 	reg = E1000_READ_REG(hw, E1000_RFCTL);
5046 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5047 
5048 	/* Disable IPv6 extension header parsing because some malformed
5049 	 * IPv6 headers can hang the Rx.
5050 	 */
5051 	if (hw->mac.type == e1000_ich8lan)
5052 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5053 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5054 
5055 	/* Enable ECC on Lynxpoint */
5056 	if (hw->mac.type == e1000_pch_lpt ||
5057 	    hw->mac.type == e1000_pch_spt) {
5058 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5059 		reg |= E1000_PBECCSTS_ECC_ENABLE;
5060 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5061 
5062 		reg = E1000_READ_REG(hw, E1000_CTRL);
5063 		reg |= E1000_CTRL_MEHE;
5064 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
5065 	}
5066 
5067 	return;
5068 }
5069 
5070 /**
5071  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5072  *  @hw: pointer to the HW structure
5073  *
5074  *  Determines which flow control settings to use, then configures flow
5075  *  control.  Calls the appropriate media-specific link configuration
5076  *  function.  Assuming the adapter has a valid link partner, a valid link
5077  *  should be established.  Assumes the hardware has previously been reset
5078  *  and the transmitter and receiver are not enabled.
5079  **/
5080 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5081 {
5082 	s32 ret_val;
5083 
5084 	DEBUGFUNC("e1000_setup_link_ich8lan");
5085 
5086 	if (hw->phy.ops.check_reset_block(hw))
5087 		return E1000_SUCCESS;
5088 
5089 	/* ICH parts do not have a word in the NVM to determine
5090 	 * the default flow control setting, so we explicitly
5091 	 * set it to full.
5092 	 */
5093 	if (hw->fc.requested_mode == e1000_fc_default)
5094 		hw->fc.requested_mode = e1000_fc_full;
5095 
5096 	/* Save off the requested flow control mode for use later.  Depending
5097 	 * on the link partner's capabilities, we may or may not use this mode.
5098 	 */
5099 	hw->fc.current_mode = hw->fc.requested_mode;
5100 
5101 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5102 		hw->fc.current_mode);
5103 
5104 	/* Continue to configure the copper link. */
5105 	ret_val = hw->mac.ops.setup_physical_interface(hw);
5106 	if (ret_val)
5107 		return ret_val;
5108 
5109 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5110 	if ((hw->phy.type == e1000_phy_82578) ||
5111 	    (hw->phy.type == e1000_phy_82579) ||
5112 	    (hw->phy.type == e1000_phy_i217) ||
5113 	    (hw->phy.type == e1000_phy_82577)) {
5114 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5115 
5116 		ret_val = hw->phy.ops.write_reg(hw,
5117 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
5118 					     hw->fc.pause_time);
5119 		if (ret_val)
5120 			return ret_val;
5121 	}
5122 
5123 	return e1000_set_fc_watermarks_generic(hw);
5124 }
5125 
5126 /**
5127  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5128  *  @hw: pointer to the HW structure
5129  *
5130  *  Configures the kumeran interface to the PHY to wait the appropriate time
5131  *  when polling the PHY, then call the generic setup_copper_link to finish
5132  *  configuring the copper link.
5133  **/
5134 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5135 {
5136 	u32 ctrl;
5137 	s32 ret_val;
5138 	u16 reg_data;
5139 
5140 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5141 
5142 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5143 	ctrl |= E1000_CTRL_SLU;
5144 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5145 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5146 
5147 	/* Set the mac to wait the maximum time between each iteration
5148 	 * and increase the max iterations when polling the phy;
5149 	 * this fixes erroneous timeouts at 10Mbps.
5150 	 */
5151 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5152 					       0xFFFF);
5153 	if (ret_val)
5154 		return ret_val;
5155 	ret_val = e1000_read_kmrn_reg_generic(hw,
5156 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
5157 					      &reg_data);
5158 	if (ret_val)
5159 		return ret_val;
5160 	reg_data |= 0x3F;
5161 	ret_val = e1000_write_kmrn_reg_generic(hw,
5162 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
5163 					       reg_data);
5164 	if (ret_val)
5165 		return ret_val;
5166 
5167 	switch (hw->phy.type) {
5168 	case e1000_phy_igp_3:
5169 		ret_val = e1000_copper_link_setup_igp(hw);
5170 		if (ret_val)
5171 			return ret_val;
5172 		break;
5173 	case e1000_phy_bm:
5174 	case e1000_phy_82578:
5175 		ret_val = e1000_copper_link_setup_m88(hw);
5176 		if (ret_val)
5177 			return ret_val;
5178 		break;
5179 	case e1000_phy_82577:
5180 	case e1000_phy_82579:
5181 		ret_val = e1000_copper_link_setup_82577(hw);
5182 		if (ret_val)
5183 			return ret_val;
5184 		break;
5185 	case e1000_phy_ife:
5186 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5187 					       &reg_data);
5188 		if (ret_val)
5189 			return ret_val;
5190 
5191 		reg_data &= ~IFE_PMC_AUTO_MDIX;
5192 
5193 		switch (hw->phy.mdix) {
5194 		case 1:
5195 			reg_data &= ~IFE_PMC_FORCE_MDIX;
5196 			break;
5197 		case 2:
5198 			reg_data |= IFE_PMC_FORCE_MDIX;
5199 			break;
5200 		case 0:
5201 		default:
5202 			reg_data |= IFE_PMC_AUTO_MDIX;
5203 			break;
5204 		}
5205 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5206 						reg_data);
5207 		if (ret_val)
5208 			return ret_val;
5209 		break;
5210 	default:
5211 		break;
5212 	}
5213 
5214 	return e1000_setup_copper_link_generic(hw);
5215 }
5216 
5217 /**
5218  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5219  *  @hw: pointer to the HW structure
5220  *
5221  *  Calls the PHY specific link setup function and then calls the
5222  *  generic setup_copper_link to finish configuring the link for
5223  *  Lynxpoint PCH devices
5224  **/
5225 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5226 {
5227 	u32 ctrl;
5228 	s32 ret_val;
5229 
5230 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5231 
5232 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5233 	ctrl |= E1000_CTRL_SLU;
5234 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5235 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5236 
5237 	ret_val = e1000_copper_link_setup_82577(hw);
5238 	if (ret_val)
5239 		return ret_val;
5240 
5241 	return e1000_setup_copper_link_generic(hw);
5242 }
5243 
5244 /**
5245  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5246  *  @hw: pointer to the HW structure
5247  *  @speed: pointer to store current link speed
5248  *  @duplex: pointer to store the current link duplex
5249  *
5250  *  Calls the generic get_speed_and_duplex to retrieve the current link
5251  *  information and then calls the Kumeran lock loss workaround for links at
5252  *  gigabit speeds.
5253  **/
5254 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5255 					  u16 *duplex)
5256 {
5257 	s32 ret_val;
5258 
5259 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5260 
5261 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5262 	if (ret_val)
5263 		return ret_val;
5264 
5265 	if ((hw->mac.type == e1000_ich8lan) &&
5266 	    (hw->phy.type == e1000_phy_igp_3) &&
5267 	    (*speed == SPEED_1000)) {
5268 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5269 	}
5270 
5271 	return ret_val;
5272 }
5273 
5274 /**
5275  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5276  *  @hw: pointer to the HW structure
5277  *
5278  *  Work-around for 82566 Kumeran PCS lock loss:
5279  *  On link status change (i.e. PCI reset, speed change) and link is up and
5280  *  speed is gigabit-
5281  *    0) if workaround is optionally disabled do nothing
5282  *    1) wait 1ms for Kumeran link to come up
5283  *    2) check Kumeran Diagnostic register PCS lock loss bit
5284  *    3) if not set the link is locked (all is good), otherwise...
5285  *    4) reset the PHY
5286  *    5) repeat up to 10 times
5287  *  Note: this is only called for IGP3 copper when speed is 1gb.
5288  **/
5289 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5290 {
5291 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5292 	u32 phy_ctrl;
5293 	s32 ret_val;
5294 	u16 i, data;
5295 	bool link;
5296 
5297 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5298 
5299 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5300 		return E1000_SUCCESS;
5301 
5302 	/* Make sure link is up before proceeding.  If not just return.
5303 	 * Attempting this while link is negotiating fouled up link
5304 	 * stability
5305 	 */
5306 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5307 	if (!link)
5308 		return E1000_SUCCESS;
5309 
5310 	for (i = 0; i < 10; i++) {
5311 		/* read once to clear */
5312 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5313 		if (ret_val)
5314 			return ret_val;
5315 		/* and again to get new status */
5316 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5317 		if (ret_val)
5318 			return ret_val;
5319 
5320 		/* check for PCS lock */
5321 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5322 			return E1000_SUCCESS;
5323 
5324 		/* Issue PHY reset */
5325 		hw->phy.ops.reset(hw);
5326 		msec_delay_irq(5);
5327 	}
5328 	/* Disable GigE link negotiation */
5329 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5330 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5331 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5332 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5333 
5334 	/* Call gig speed drop workaround on Gig disable before accessing
5335 	 * any PHY registers
5336 	 */
5337 	e1000_gig_downshift_workaround_ich8lan(hw);
5338 
5339 	/* unable to acquire PCS lock */
5340 	return -E1000_ERR_PHY;
5341 }
5342 
5343 /**
5344  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5345  *  @hw: pointer to the HW structure
5346  *  @state: boolean value used to set the current Kumeran workaround state
5347  *
5348  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
5349  *  /disabled - FALSE).
5350  **/
5351 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5352 						 bool state)
5353 {
5354 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5355 
5356 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5357 
5358 	if (hw->mac.type != e1000_ich8lan) {
5359 		DEBUGOUT("Workaround applies to ICH8 only.\n");
5360 		return;
5361 	}
5362 
5363 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5364 
5365 	return;
5366 }
5367 
5368 /**
5369  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5370  *  @hw: pointer to the HW structure
5371  *
5372  *  Workaround for 82566 power-down on D3 entry:
5373  *    1) disable gigabit link
5374  *    2) write VR power-down enable
5375  *    3) read it back
5376  *  Continue if successful, else issue LCD reset and repeat
5377  **/
5378 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5379 {
5380 	u32 reg;
5381 	u16 data;
5382 	u8  retry = 0;
5383 
5384 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5385 
5386 	if (hw->phy.type != e1000_phy_igp_3)
5387 		return;
5388 
5389 	/* Try the workaround twice (if needed) */
5390 	do {
5391 		/* Disable link */
5392 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5393 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5394 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5395 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5396 
5397 		/* Call gig speed drop workaround on Gig disable before
5398 		 * accessing any PHY registers
5399 		 */
5400 		if (hw->mac.type == e1000_ich8lan)
5401 			e1000_gig_downshift_workaround_ich8lan(hw);
5402 
5403 		/* Write VR power-down enable */
5404 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5405 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5406 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5407 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5408 
5409 		/* Read it back and test */
5410 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5411 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5412 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5413 			break;
5414 
5415 		/* Issue PHY reset and repeat at most one more time */
5416 		reg = E1000_READ_REG(hw, E1000_CTRL);
5417 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5418 		retry++;
5419 	} while (retry);
5420 }
5421 
5422 /**
5423  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5424  *  @hw: pointer to the HW structure
5425  *
5426  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5427  *  LPLU, Gig disable, MDIC PHY reset):
5428  *    1) Set Kumeran Near-end loopback
5429  *    2) Clear Kumeran Near-end loopback
5430  *  Should only be called for ICH8[m] devices with any 1G Phy.
5431  **/
5432 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5433 {
5434 	s32 ret_val;
5435 	u16 reg_data;
5436 
5437 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5438 
5439 	if ((hw->mac.type != e1000_ich8lan) ||
5440 	    (hw->phy.type == e1000_phy_ife))
5441 		return;
5442 
5443 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5444 					      &reg_data);
5445 	if (ret_val)
5446 		return;
5447 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5448 	ret_val = e1000_write_kmrn_reg_generic(hw,
5449 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
5450 					       reg_data);
5451 	if (ret_val)
5452 		return;
5453 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5454 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5455 				     reg_data);
5456 }
5457 
5458 /**
5459  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5460  *  @hw: pointer to the HW structure
5461  *
5462  *  During S0 to Sx transition, it is possible the link remains at gig
5463  *  instead of negotiating to a lower speed.  Before going to Sx, set
5464  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5465  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5466  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5467  *  needs to be written.
5468  *  Parts that support (and are linked to a partner which support) EEE in
5469  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5470  *  than 10Mbps w/o EEE.
5471  **/
5472 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5473 {
5474 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5475 	u32 phy_ctrl;
5476 	s32 ret_val;
5477 
5478 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5479 
5480 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5481 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5482 
5483 	if (hw->phy.type == e1000_phy_i217) {
5484 		u16 phy_reg, device_id = hw->device_id;
5485 
5486 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5487 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5488 		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5489 		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5490 		    (hw->mac.type == e1000_pch_spt)) {
5491 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5492 
5493 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5494 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5495 		}
5496 
5497 		ret_val = hw->phy.ops.acquire(hw);
5498 		if (ret_val)
5499 			goto out;
5500 
5501 		if (!dev_spec->eee_disable) {
5502 			u16 eee_advert;
5503 
5504 			ret_val =
5505 			    e1000_read_emi_reg_locked(hw,
5506 						      I217_EEE_ADVERTISEMENT,
5507 						      &eee_advert);
5508 			if (ret_val)
5509 				goto release;
5510 
5511 			/* Disable LPLU if both link partners support 100BaseT
5512 			 * EEE and 100Full is advertised on both ends of the
5513 			 * link, and enable Auto Enable LPI since there will
5514 			 * be no driver to enable LPI while in Sx.
5515 			 */
5516 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5517 			    (dev_spec->eee_lp_ability &
5518 			     I82579_EEE_100_SUPPORTED) &&
5519 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5520 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5521 					      E1000_PHY_CTRL_NOND0A_LPLU);
5522 
5523 				/* Set Auto Enable LPI after link up */
5524 				hw->phy.ops.read_reg_locked(hw,
5525 							    I217_LPI_GPIO_CTRL,
5526 							    &phy_reg);
5527 				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5528 				hw->phy.ops.write_reg_locked(hw,
5529 							     I217_LPI_GPIO_CTRL,
5530 							     phy_reg);
5531 			}
5532 		}
5533 
5534 		/* For i217 Intel Rapid Start Technology support,
5535 		 * when the system is going into Sx and no manageability engine
5536 		 * is present, the driver must configure proxy to reset only on
5537 		 * power good.  LPI (Low Power Idle) state must also reset only
5538 		 * on power good, as well as the MTA (Multicast table array).
5539 		 * The SMBus release must also be disabled on LCD reset.
5540 		 */
5541 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5542 		      E1000_ICH_FWSM_FW_VALID)) {
5543 			/* Enable proxy to reset only on power good. */
5544 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5545 						    &phy_reg);
5546 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5547 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5548 						     phy_reg);
5549 
5550 			/* Set bit enable LPI (EEE) to reset only on
5551 			 * power good.
5552 			*/
5553 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5554 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5555 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5556 
5557 			/* Disable the SMB release on LCD reset. */
5558 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5559 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5560 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5561 		}
5562 
5563 		/* Enable MTA to reset for Intel Rapid Start Technology
5564 		 * Support
5565 		 */
5566 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5567 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5568 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5569 
5570 release:
5571 		hw->phy.ops.release(hw);
5572 	}
5573 out:
5574 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5575 
5576 	if (hw->mac.type == e1000_ich8lan)
5577 		e1000_gig_downshift_workaround_ich8lan(hw);
5578 
5579 	if (hw->mac.type >= e1000_pchlan) {
5580 		e1000_oem_bits_config_ich8lan(hw, FALSE);
5581 
5582 		/* Reset PHY to activate OEM bits on 82577/8 */
5583 		if (hw->mac.type == e1000_pchlan)
5584 			e1000_phy_hw_reset_generic(hw);
5585 
5586 		ret_val = hw->phy.ops.acquire(hw);
5587 		if (ret_val)
5588 			return;
5589 		e1000_write_smbus_addr(hw);
5590 		hw->phy.ops.release(hw);
5591 	}
5592 
5593 	return;
5594 }
5595 
5596 /**
5597  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5598  *  @hw: pointer to the HW structure
5599  *
5600  *  During Sx to S0 transitions on non-managed devices or managed devices
5601  *  on which PHY resets are not blocked, if the PHY registers cannot be
5602  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5603  *  the PHY.
5604  *  On i217, setup Intel Rapid Start Technology.
5605  **/
5606 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5607 {
5608 	s32 ret_val;
5609 
5610 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
5611 
5612 	if (hw->mac.type < e1000_pch2lan)
5613 		return;
5614 
5615 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5616 	if (ret_val) {
5617 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5618 		return;
5619 	}
5620 
5621 	/* For i217 Intel Rapid Start Technology support when the system
5622 	 * is transitioning from Sx and no manageability engine is present
5623 	 * configure SMBus to restore on reset, disable proxy, and enable
5624 	 * the reset on MTA (Multicast table array).
5625 	 */
5626 	if (hw->phy.type == e1000_phy_i217) {
5627 		u16 phy_reg;
5628 
5629 		ret_val = hw->phy.ops.acquire(hw);
5630 		if (ret_val) {
5631 			DEBUGOUT("Failed to setup iRST\n");
5632 			return;
5633 		}
5634 
5635 		/* Clear Auto Enable LPI after link up */
5636 		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5637 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5638 		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5639 
5640 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5641 		    E1000_ICH_FWSM_FW_VALID)) {
5642 			/* Restore clear on SMB if no manageability engine
5643 			 * is present
5644 			 */
5645 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5646 							      &phy_reg);
5647 			if (ret_val)
5648 				goto release;
5649 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5650 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5651 
5652 			/* Disable Proxy */
5653 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5654 		}
5655 		/* Enable reset on MTA */
5656 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5657 						      &phy_reg);
5658 		if (ret_val)
5659 			goto release;
5660 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5661 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5662 release:
5663 		if (ret_val)
5664 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5665 		hw->phy.ops.release(hw);
5666 	}
5667 }
5668 
5669 /**
5670  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5671  *  @hw: pointer to the HW structure
5672  *
5673  *  Return the LED back to the default configuration.
5674  **/
5675 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5676 {
5677 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
5678 
5679 	if (hw->phy.type == e1000_phy_ife)
5680 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5681 					     0);
5682 
5683 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5684 	return E1000_SUCCESS;
5685 }
5686 
5687 /**
5688  *  e1000_led_on_ich8lan - Turn LEDs on
5689  *  @hw: pointer to the HW structure
5690  *
5691  *  Turn on the LEDs.
5692  **/
5693 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5694 {
5695 	DEBUGFUNC("e1000_led_on_ich8lan");
5696 
5697 	if (hw->phy.type == e1000_phy_ife)
5698 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5699 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5700 
5701 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5702 	return E1000_SUCCESS;
5703 }
5704 
5705 /**
5706  *  e1000_led_off_ich8lan - Turn LEDs off
5707  *  @hw: pointer to the HW structure
5708  *
5709  *  Turn off the LEDs.
5710  **/
5711 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5712 {
5713 	DEBUGFUNC("e1000_led_off_ich8lan");
5714 
5715 	if (hw->phy.type == e1000_phy_ife)
5716 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5717 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5718 
5719 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5720 	return E1000_SUCCESS;
5721 }
5722 
5723 /**
5724  *  e1000_setup_led_pchlan - Configures SW controllable LED
5725  *  @hw: pointer to the HW structure
5726  *
5727  *  This prepares the SW controllable LED for use.
5728  **/
5729 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5730 {
5731 	DEBUGFUNC("e1000_setup_led_pchlan");
5732 
5733 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5734 				     (u16)hw->mac.ledctl_mode1);
5735 }
5736 
5737 /**
5738  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5739  *  @hw: pointer to the HW structure
5740  *
5741  *  Return the LED back to the default configuration.
5742  **/
5743 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5744 {
5745 	DEBUGFUNC("e1000_cleanup_led_pchlan");
5746 
5747 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5748 				     (u16)hw->mac.ledctl_default);
5749 }
5750 
5751 /**
5752  *  e1000_led_on_pchlan - Turn LEDs on
5753  *  @hw: pointer to the HW structure
5754  *
5755  *  Turn on the LEDs.
5756  **/
5757 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5758 {
5759 	u16 data = (u16)hw->mac.ledctl_mode2;
5760 	u32 i, led;
5761 
5762 	DEBUGFUNC("e1000_led_on_pchlan");
5763 
5764 	/* If no link, then turn LED on by setting the invert bit
5765 	 * for each LED that's mode is "link_up" in ledctl_mode2.
5766 	 */
5767 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5768 		for (i = 0; i < 3; i++) {
5769 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5770 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5771 			    E1000_LEDCTL_MODE_LINK_UP)
5772 				continue;
5773 			if (led & E1000_PHY_LED0_IVRT)
5774 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5775 			else
5776 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5777 		}
5778 	}
5779 
5780 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5781 }
5782 
5783 /**
5784  *  e1000_led_off_pchlan - Turn LEDs off
5785  *  @hw: pointer to the HW structure
5786  *
5787  *  Turn off the LEDs.
5788  **/
5789 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5790 {
5791 	u16 data = (u16)hw->mac.ledctl_mode1;
5792 	u32 i, led;
5793 
5794 	DEBUGFUNC("e1000_led_off_pchlan");
5795 
5796 	/* If no link, then turn LED off by clearing the invert bit
5797 	 * for each LED that's mode is "link_up" in ledctl_mode1.
5798 	 */
5799 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5800 		for (i = 0; i < 3; i++) {
5801 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5802 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5803 			    E1000_LEDCTL_MODE_LINK_UP)
5804 				continue;
5805 			if (led & E1000_PHY_LED0_IVRT)
5806 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5807 			else
5808 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5809 		}
5810 	}
5811 
5812 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5813 }
5814 
5815 /**
5816  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5817  *  @hw: pointer to the HW structure
5818  *
5819  *  Read appropriate register for the config done bit for completion status
5820  *  and configure the PHY through s/w for EEPROM-less parts.
5821  *
5822  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5823  *  config done bit, so only an error is logged and continues.  If we were
5824  *  to return with error, EEPROM-less silicon would not be able to be reset
5825  *  or change link.
5826  **/
5827 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5828 {
5829 	s32 ret_val = E1000_SUCCESS;
5830 	u32 bank = 0;
5831 	u32 status;
5832 
5833 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5834 
5835 	e1000_get_cfg_done_generic(hw);
5836 
5837 	/* Wait for indication from h/w that it has completed basic config */
5838 	if (hw->mac.type >= e1000_ich10lan) {
5839 		e1000_lan_init_done_ich8lan(hw);
5840 	} else {
5841 		ret_val = e1000_get_auto_rd_done_generic(hw);
5842 		if (ret_val) {
5843 			/* When auto config read does not complete, do not
5844 			 * return with an error. This can happen in situations
5845 			 * where there is no eeprom and prevents getting link.
5846 			 */
5847 			DEBUGOUT("Auto Read Done did not complete\n");
5848 			ret_val = E1000_SUCCESS;
5849 		}
5850 	}
5851 
5852 	/* Clear PHY Reset Asserted bit */
5853 	status = E1000_READ_REG(hw, E1000_STATUS);
5854 	if (status & E1000_STATUS_PHYRA)
5855 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5856 	else
5857 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5858 
5859 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
5860 	if (hw->mac.type <= e1000_ich9lan) {
5861 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5862 		    (hw->phy.type == e1000_phy_igp_3)) {
5863 			e1000_phy_init_script_igp3(hw);
5864 		}
5865 	} else {
5866 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5867 			/* Maybe we should do a basic PHY config */
5868 			DEBUGOUT("EEPROM not present\n");
5869 			ret_val = -E1000_ERR_CONFIG;
5870 		}
5871 	}
5872 
5873 	return ret_val;
5874 }
5875 
5876 /**
5877  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5878  * @hw: pointer to the HW structure
5879  *
5880  * In the case of a PHY power down to save power, or to turn off link during a
5881  * driver unload, or wake on lan is not enabled, remove the link.
5882  **/
5883 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5884 {
5885 	/* If the management interface is not enabled, then power down */
5886 	if (!(hw->mac.ops.check_mng_mode(hw) ||
5887 	      hw->phy.ops.check_reset_block(hw)))
5888 		e1000_power_down_phy_copper(hw);
5889 
5890 	return;
5891 }
5892 
5893 /**
5894  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5895  *  @hw: pointer to the HW structure
5896  *
5897  *  Clears hardware counters specific to the silicon family and calls
5898  *  clear_hw_cntrs_generic to clear all general purpose counters.
5899  **/
5900 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5901 {
5902 	u16 phy_data;
5903 	s32 ret_val;
5904 
5905 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5906 
5907 	e1000_clear_hw_cntrs_base_generic(hw);
5908 
5909 	E1000_READ_REG(hw, E1000_ALGNERRC);
5910 	E1000_READ_REG(hw, E1000_RXERRC);
5911 	E1000_READ_REG(hw, E1000_TNCRS);
5912 	E1000_READ_REG(hw, E1000_CEXTERR);
5913 	E1000_READ_REG(hw, E1000_TSCTC);
5914 	E1000_READ_REG(hw, E1000_TSCTFC);
5915 
5916 	E1000_READ_REG(hw, E1000_MGTPRC);
5917 	E1000_READ_REG(hw, E1000_MGTPDC);
5918 	E1000_READ_REG(hw, E1000_MGTPTC);
5919 
5920 	E1000_READ_REG(hw, E1000_IAC);
5921 	E1000_READ_REG(hw, E1000_ICRXOC);
5922 
5923 	/* Clear PHY statistics registers */
5924 	if ((hw->phy.type == e1000_phy_82578) ||
5925 	    (hw->phy.type == e1000_phy_82579) ||
5926 	    (hw->phy.type == e1000_phy_i217) ||
5927 	    (hw->phy.type == e1000_phy_82577)) {
5928 		ret_val = hw->phy.ops.acquire(hw);
5929 		if (ret_val)
5930 			return;
5931 		ret_val = hw->phy.ops.set_page(hw,
5932 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
5933 		if (ret_val)
5934 			goto release;
5935 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5936 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5937 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5938 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5939 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5940 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5941 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5942 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5943 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5944 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5945 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5946 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5947 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5948 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5949 release:
5950 		hw->phy.ops.release(hw);
5951 	}
5952 }
5953 
5954