xref: /dragonfly/sys/dev/netif/ig_hal/e1000_ich8lan.c (revision 277350a0)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD:$*/
34 
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  * Ethernet Connection I217-LM
63  * Ethernet Connection I217-V
64  * Ethernet Connection I218-V
65  * Ethernet Connection I218-LM
66  * Ethernet Connection (2) I218-LM
67  * Ethernet Connection (2) I218-V
68  * Ethernet Connection (3) I218-LM
69  * Ethernet Connection (3) I218-V
70  */
71 
72 #include "e1000_api.h"
73 
74 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84 					      u8 *mc_addr_list,
85 					      u32 mc_addr_count);
86 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90 					    bool active);
91 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92 					    bool active);
93 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 				   u16 words, u16 *data);
95 static s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset,
96 				   u16 words, u16 *data);
97 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 				    u16 words, u16 *data);
99 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103 					    u16 *data);
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 					   u16 *speed, u16 *duplex);
113 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 					  u32 offset, u8 *data);
127 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128 					  u8 size, u16 *data);
129 static s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130 					  u32 *data);
131 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
132 					  u32 offset, u16 *data);
133 static s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
134 					  u32 offset, u32 *data);
135 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
136 						 u32 offset, u8 byte);
137 static s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
138 						 u32 offset, u32 dword);
139 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
140 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
141 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
142 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
143 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
144 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
145 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
146 
147 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
148 /* Offset 04h HSFSTS */
149 union ich8_hws_flash_status {
150 	struct ich8_hsfsts {
151 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
152 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
153 		u16 dael:1; /* bit 2 Direct Access error Log */
154 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
155 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
156 		u16 reserved1:2; /* bit 13:6 Reserved */
157 		u16 reserved2:6; /* bit 13:6 Reserved */
158 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
159 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
160 	} hsf_status;
161 	u16 regval;
162 };
163 
164 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
165 /* Offset 06h FLCTL */
166 union ich8_hws_flash_ctrl {
167 	struct ich8_hsflctl {
168 		u16 flcgo:1;   /* 0 Flash Cycle Go */
169 		u16 flcycle:2;   /* 2:1 Flash Cycle */
170 		u16 reserved:5;   /* 7:3 Reserved  */
171 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
172 		u16 flockdn:6;   /* 15:10 Reserved */
173 	} hsf_ctrl;
174 	u16 regval;
175 };
176 
177 /* ICH Flash Region Access Permissions */
178 union ich8_hws_flash_regacc {
179 	struct ich8_flracc {
180 		u32 grra:8; /* 0:7 GbE region Read Access */
181 		u32 grwa:8; /* 8:15 GbE region Write Access */
182 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
183 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
184 	} hsf_flregacc;
185 	u16 regval;
186 };
187 
188 /**
189  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
190  *  @hw: pointer to the HW structure
191  *
192  *  Test access to the PHY registers by reading the PHY ID registers.  If
193  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
194  *  otherwise assume the read PHY ID is correct if it is valid.
195  *
196  *  Assumes the sw/fw/hw semaphore is already acquired.
197  **/
198 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
199 {
200 	u16 phy_reg = 0;
201 	u32 phy_id = 0;
202 	s32 ret_val = 0;
203 	u16 retry_count;
204 	u32 mac_reg = 0;
205 
206 	for (retry_count = 0; retry_count < 2; retry_count++) {
207 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
208 		if (ret_val || (phy_reg == 0xFFFF))
209 			continue;
210 		phy_id = (u32)(phy_reg << 16);
211 
212 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
213 		if (ret_val || (phy_reg == 0xFFFF)) {
214 			phy_id = 0;
215 			continue;
216 		}
217 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
218 		break;
219 	}
220 
221 	if (hw->phy.id) {
222 		if  (hw->phy.id == phy_id)
223 			goto out;
224 	} else if (phy_id) {
225 		hw->phy.id = phy_id;
226 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
227 		goto out;
228 	}
229 
230 	/* In case the PHY needs to be in mdio slow mode,
231 	 * set slow mode and try to get the PHY id again.
232 	 */
233 	if (hw->mac.type < e1000_pch_lpt) {
234 		hw->phy.ops.release(hw);
235 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
236 		if (!ret_val)
237 			ret_val = e1000_get_phy_id(hw);
238 		hw->phy.ops.acquire(hw);
239 	}
240 
241 	if (ret_val)
242 		return FALSE;
243 out:
244 	if (hw->mac.type == e1000_pch_lpt ||
245 	    hw->mac.type == e1000_pch_spt) {
246 		/* Unforce SMBus mode in PHY */
247 		hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
248 		phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
249 		hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
250 
251 		/* Unforce SMBus mode in MAC */
252 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
253 		mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
254 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
255 	}
256 
257 	return TRUE;
258 }
259 
260 /**
261  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
262  *  @hw: pointer to the HW structure
263  *
264  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
265  *  used to reset the PHY to a quiescent state when necessary.
266  **/
267 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
268 {
269 	u32 mac_reg;
270 
271 	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
272 
273 	/* Set Phy Config Counter to 50msec */
274 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
275 	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
276 	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
277 	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
278 
279 	/* Toggle LANPHYPC Value bit */
280 	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
281 	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
282 	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
283 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
284 	E1000_WRITE_FLUSH(hw);
285 	usec_delay(10);
286 	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
287 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
288 	E1000_WRITE_FLUSH(hw);
289 
290 	if (hw->mac.type < e1000_pch_lpt) {
291 		msec_delay(50);
292 	} else {
293 		u16 count = 20;
294 
295 		do {
296 			msec_delay(5);
297 		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
298 			   E1000_CTRL_EXT_LPCD) && count--);
299 
300 		msec_delay(30);
301 	}
302 }
303 
304 /**
305  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
306  *  @hw: pointer to the HW structure
307  *
308  *  Workarounds/flow necessary for PHY initialization during driver load
309  *  and resume paths.
310  **/
311 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
312 {
313 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
314 	s32 ret_val;
315 
316 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
317 
318 	/* Gate automatic PHY configuration by hardware on managed and
319 	 * non-managed 82579 and newer adapters.
320 	 */
321 	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
322 
323 	/* It is not possible to be certain of the current state of ULP
324 	 * so forcibly disable it.
325 	 */
326 	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
327 	e1000_disable_ulp_lpt_lp(hw, TRUE);
328 
329 	ret_val = hw->phy.ops.acquire(hw);
330 	if (ret_val) {
331 		DEBUGOUT("Failed to initialize PHY flow\n");
332 		goto out;
333 	}
334 
335 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
336 	 * inaccessible and resetting the PHY is not blocked, toggle the
337 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
338 	 */
339 	switch (hw->mac.type) {
340 	case e1000_pch_lpt:
341 	case e1000_pch_spt:
342 		if (e1000_phy_is_accessible_pchlan(hw))
343 			break;
344 
345 		/* Before toggling LANPHYPC, see if PHY is accessible by
346 		 * forcing MAC to SMBus mode first.
347 		 */
348 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
349 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
350 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
351 
352 		/* Wait 50 milliseconds for MAC to finish any retries
353 		 * that it might be trying to perform from previous
354 		 * attempts to acknowledge any phy read requests.
355 		 */
356 		 msec_delay(50);
357 
358 		/* fall-through */
359 	case e1000_pch2lan:
360 		if (e1000_phy_is_accessible_pchlan(hw))
361 			break;
362 
363 		/* fall-through */
364 	case e1000_pchlan:
365 		if ((hw->mac.type == e1000_pchlan) &&
366 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
367 			break;
368 
369 		if (hw->phy.ops.check_reset_block(hw)) {
370 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
371 			ret_val = -E1000_ERR_PHY;
372 			break;
373 		}
374 
375 		/* Toggle LANPHYPC Value bit */
376 		e1000_toggle_lanphypc_pch_lpt(hw);
377 		if (hw->mac.type >= e1000_pch_lpt) {
378 			if (e1000_phy_is_accessible_pchlan(hw))
379 				break;
380 
381 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
382 			 * so ensure that the MAC is also out of SMBus mode
383 			 */
384 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
385 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
386 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
387 
388 			if (e1000_phy_is_accessible_pchlan(hw))
389 				break;
390 
391 			ret_val = -E1000_ERR_PHY;
392 		}
393 		break;
394 	default:
395 		break;
396 	}
397 
398 	hw->phy.ops.release(hw);
399 	if (!ret_val) {
400 
401 		/* Check to see if able to reset PHY.  Print error if not */
402 		if (hw->phy.ops.check_reset_block(hw)) {
403 			ERROR_REPORT("Reset blocked by ME\n");
404 			goto out;
405 		}
406 
407 		/* Reset the PHY before any access to it.  Doing so, ensures
408 		 * that the PHY is in a known good state before we read/write
409 		 * PHY registers.  The generic reset is sufficient here,
410 		 * because we haven't determined the PHY type yet.
411 		 */
412 		ret_val = e1000_phy_hw_reset_generic(hw);
413 		if (ret_val)
414 			goto out;
415 
416 		/* On a successful reset, possibly need to wait for the PHY
417 		 * to quiesce to an accessible state before returning control
418 		 * to the calling function.  If the PHY does not quiesce, then
419 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
420 		 *  the PHY is in.
421 		 */
422 		ret_val = hw->phy.ops.check_reset_block(hw);
423 		if (ret_val)
424 			ERROR_REPORT("ME blocked access to PHY after reset\n");
425 	}
426 
427 out:
428 	/* Ungate automatic PHY configuration on non-managed 82579 */
429 	if ((hw->mac.type == e1000_pch2lan) &&
430 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
431 		msec_delay(10);
432 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
433 	}
434 
435 	return ret_val;
436 }
437 
438 /**
439  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
440  *  @hw: pointer to the HW structure
441  *
442  *  Initialize family-specific PHY parameters and function pointers.
443  **/
444 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
445 {
446 	struct e1000_phy_info *phy = &hw->phy;
447 	s32 ret_val;
448 
449 	DEBUGFUNC("e1000_init_phy_params_pchlan");
450 
451 	phy->addr		= 1;
452 	phy->reset_delay_us	= 100;
453 
454 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
455 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
456 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
457 	phy->ops.set_page	= e1000_set_page_igp;
458 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
459 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
460 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
461 	phy->ops.release	= e1000_release_swflag_ich8lan;
462 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
463 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
464 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
465 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
466 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
467 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
468 	phy->ops.power_up	= e1000_power_up_phy_copper;
469 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
470 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
471 
472 	phy->id = e1000_phy_unknown;
473 
474 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
475 	if (ret_val)
476 		return ret_val;
477 
478 	if (phy->id == e1000_phy_unknown)
479 		switch (hw->mac.type) {
480 		default:
481 			ret_val = e1000_get_phy_id(hw);
482 			if (ret_val)
483 				return ret_val;
484 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
485 				break;
486 			/* fall-through */
487 		case e1000_pch2lan:
488 		case e1000_pch_lpt:
489 		case e1000_pch_spt:
490 			/* In case the PHY needs to be in mdio slow mode,
491 			 * set slow mode and try to get the PHY id again.
492 			 */
493 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
494 			if (ret_val)
495 				return ret_val;
496 			ret_val = e1000_get_phy_id(hw);
497 			if (ret_val)
498 				return ret_val;
499 			break;
500 		}
501 	phy->type = e1000_get_phy_type_from_id(phy->id);
502 
503 	switch (phy->type) {
504 	case e1000_phy_82577:
505 	case e1000_phy_82579:
506 	case e1000_phy_i217:
507 		phy->ops.check_polarity = e1000_check_polarity_82577;
508 		phy->ops.force_speed_duplex =
509 			e1000_phy_force_speed_duplex_82577;
510 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
511 		phy->ops.get_info = e1000_get_phy_info_82577;
512 		phy->ops.commit = e1000_phy_sw_reset_generic;
513 		break;
514 	case e1000_phy_82578:
515 		phy->ops.check_polarity = e1000_check_polarity_m88;
516 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
517 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
518 		phy->ops.get_info = e1000_get_phy_info_m88;
519 		break;
520 	default:
521 		ret_val = -E1000_ERR_PHY;
522 		break;
523 	}
524 
525 	return ret_val;
526 }
527 
528 /**
529  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
530  *  @hw: pointer to the HW structure
531  *
532  *  Initialize family-specific PHY parameters and function pointers.
533  **/
534 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
535 {
536 	struct e1000_phy_info *phy = &hw->phy;
537 	s32 ret_val;
538 	u16 i = 0;
539 
540 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
541 
542 	phy->addr		= 1;
543 	phy->reset_delay_us	= 100;
544 
545 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
546 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
547 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
548 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
549 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
550 	phy->ops.release	= e1000_release_swflag_ich8lan;
551 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
552 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
553 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
554 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
555 	phy->ops.power_up	= e1000_power_up_phy_copper;
556 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
557 
558 	/* We may need to do this twice - once for IGP and if that fails,
559 	 * we'll set BM func pointers and try again
560 	 */
561 	ret_val = e1000_determine_phy_address(hw);
562 	if (ret_val) {
563 		phy->ops.write_reg = e1000_write_phy_reg_bm;
564 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
565 		ret_val = e1000_determine_phy_address(hw);
566 		if (ret_val) {
567 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
568 			return ret_val;
569 		}
570 	}
571 
572 	phy->id = 0;
573 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
574 	       (i++ < 100)) {
575 		msec_delay(1);
576 		ret_val = e1000_get_phy_id(hw);
577 		if (ret_val)
578 			return ret_val;
579 	}
580 
581 	/* Verify phy id */
582 	switch (phy->id) {
583 	case IGP03E1000_E_PHY_ID:
584 		phy->type = e1000_phy_igp_3;
585 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
586 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
587 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
588 		phy->ops.get_info = e1000_get_phy_info_igp;
589 		phy->ops.check_polarity = e1000_check_polarity_igp;
590 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
591 		break;
592 	case IFE_E_PHY_ID:
593 	case IFE_PLUS_E_PHY_ID:
594 	case IFE_C_E_PHY_ID:
595 		phy->type = e1000_phy_ife;
596 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
597 		phy->ops.get_info = e1000_get_phy_info_ife;
598 		phy->ops.check_polarity = e1000_check_polarity_ife;
599 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
600 		break;
601 	case BME1000_E_PHY_ID:
602 		phy->type = e1000_phy_bm;
603 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
604 		phy->ops.read_reg = e1000_read_phy_reg_bm;
605 		phy->ops.write_reg = e1000_write_phy_reg_bm;
606 		phy->ops.commit = e1000_phy_sw_reset_generic;
607 		phy->ops.get_info = e1000_get_phy_info_m88;
608 		phy->ops.check_polarity = e1000_check_polarity_m88;
609 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
610 		break;
611 	default:
612 		return -E1000_ERR_PHY;
613 		break;
614 	}
615 
616 	return E1000_SUCCESS;
617 }
618 
619 /**
620  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
621  *  @hw: pointer to the HW structure
622  *
623  *  Initialize family-specific NVM parameters and function
624  *  pointers.
625  **/
626 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
627 {
628 	struct e1000_nvm_info *nvm = &hw->nvm;
629 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
630 	u32 gfpreg, sector_base_addr, sector_end_addr;
631 	u16 i;
632 	u32 nvm_size;
633 
634 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
635 
636 	/* Can't read flash registers if the register set isn't mapped. */
637 	nvm->type = e1000_nvm_flash_sw;
638 
639 	/* XXX turn flash_address into flash_reg_off or something more appropriate */
640 #define E1000_FLASH_BASE_ADDR	0xE000  /* offset of NVM access regs */
641 #define NVM_SIZE_MULTIPLIER	4096
642 
643 	if (hw->mac.type == e1000_pch_spt) {
644 		/*
645 		 * In SPT the flash is in the GbE flash region of the
646 		 * main hw map.  GFPREG does not exist.  Take NVM size from
647 		 * the STRAP register.
648 		 */
649 		nvm->flash_base_addr = 0;
650 		nvm_size = (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
651 			   * NVM_SIZE_MULTIPLIER;
652 		nvm->flash_bank_size = nvm_size / 2;
653 		/* Adjust to word count */
654 		nvm->flash_bank_size /= sizeof(u16);
655 		/* Set the base address for flash register access */
656 		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
657 	} else {
658 		if (!hw->flash_address) {
659 			DEBUGOUT("ERROR: Flash registers not mapped\n");
660 			return -E1000_ERR_CONFIG;
661 		}
662 
663 		gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
664 
665 		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
666 		 * Add 1 to sector_end_addr since this sector is included in
667 		 * the overall size.
668 		 */
669 		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
670 		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
671 
672 		/* flash_base_addr is byte-aligned */
673 		nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
674 
675 		/* find total size of the NVM, then cut in half since the total
676 		 * size represents two separate NVM banks.
677 		 */
678 		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
679 					<< FLASH_SECTOR_ADDR_SHIFT);
680 		nvm->flash_bank_size /= 2;
681 		/* Adjust to word count */
682 		nvm->flash_bank_size /= sizeof(u16);
683 	}
684 
685 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
686 
687 	/* Clear shadow ram */
688 	for (i = 0; i < nvm->word_size; i++) {
689 		dev_spec->shadow_ram[i].modified = FALSE;
690 		dev_spec->shadow_ram[i].value    = 0xFFFF;
691 	}
692 
693 	/* Function Pointers */
694 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
695 	nvm->ops.release	= e1000_release_nvm_ich8lan;
696 	if (hw->mac.type == e1000_pch_spt) {
697 		nvm->ops.read		= e1000_read_nvm_spt;
698 		nvm->ops.update		= e1000_update_nvm_checksum_spt;
699 	} else {
700 		nvm->ops.read		= e1000_read_nvm_ich8lan;
701 		nvm->ops.update		= e1000_update_nvm_checksum_ich8lan;
702 	}
703 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
704 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
705 	nvm->ops.write		= e1000_write_nvm_ich8lan;
706 
707 	return E1000_SUCCESS;
708 }
709 
710 /**
711  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
712  *  @hw: pointer to the HW structure
713  *
714  *  Initialize family-specific MAC parameters and function
715  *  pointers.
716  **/
717 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
718 {
719 	struct e1000_mac_info *mac = &hw->mac;
720 	u16 pci_cfg;
721 
722 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
723 
724 	/* Set media type function pointer */
725 	hw->phy.media_type = e1000_media_type_copper;
726 
727 	/* Set mta register count */
728 	mac->mta_reg_count = 32;
729 	/* Set rar entry count */
730 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
731 	if (mac->type == e1000_ich8lan)
732 		mac->rar_entry_count--;
733 	/* Set if part includes ASF firmware */
734 	mac->asf_firmware_present = TRUE;
735 	/* FWSM register */
736 	mac->has_fwsm = TRUE;
737 	/* ARC subsystem not supported */
738 	mac->arc_subsystem_valid = FALSE;
739 	/* Adaptive IFS supported */
740 	mac->adaptive_ifs = TRUE;
741 
742 	/* Function pointers */
743 
744 	/* bus type/speed/width */
745 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
746 	/* function id */
747 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
748 	/* reset */
749 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
750 	/* hw initialization */
751 	mac->ops.init_hw = e1000_init_hw_ich8lan;
752 	/* link setup */
753 	mac->ops.setup_link = e1000_setup_link_ich8lan;
754 	/* physical interface setup */
755 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
756 	/* check for link */
757 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
758 	/* link info */
759 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
760 	/* multicast address update */
761 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
762 	/* clear hardware counters */
763 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
764 
765 	/* LED and other operations */
766 	switch (mac->type) {
767 	case e1000_ich8lan:
768 	case e1000_ich9lan:
769 	case e1000_ich10lan:
770 		/* check management mode */
771 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
772 		/* ID LED init */
773 		mac->ops.id_led_init = e1000_id_led_init_generic;
774 		/* blink LED */
775 		mac->ops.blink_led = e1000_blink_led_generic;
776 		/* setup LED */
777 		mac->ops.setup_led = e1000_setup_led_generic;
778 		/* cleanup LED */
779 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
780 		/* turn on/off LED */
781 		mac->ops.led_on = e1000_led_on_ich8lan;
782 		mac->ops.led_off = e1000_led_off_ich8lan;
783 		break;
784 	case e1000_pch2lan:
785 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
786 		mac->ops.rar_set = e1000_rar_set_pch2lan;
787 		/* fall-through */
788 	case e1000_pch_lpt:
789 	case e1000_pch_spt:
790 		/* multicast address update for pch2 */
791 		mac->ops.update_mc_addr_list =
792 			e1000_update_mc_addr_list_pch2lan;
793 	case e1000_pchlan:
794 		/* save PCH revision_id */
795 		e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
796 		hw->revision_id = (u8)(pci_cfg &= 0x000F);
797 		/* check management mode */
798 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
799 		/* ID LED init */
800 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
801 		/* setup LED */
802 		mac->ops.setup_led = e1000_setup_led_pchlan;
803 		/* cleanup LED */
804 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
805 		/* turn on/off LED */
806 		mac->ops.led_on = e1000_led_on_pchlan;
807 		mac->ops.led_off = e1000_led_off_pchlan;
808 		break;
809 	default:
810 		break;
811 	}
812 
813 	if (mac->type == e1000_pch_lpt ||
814 	    mac->type == e1000_pch_spt) {
815 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
816 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
817 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
818 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
819 	}
820 
821 	/* Enable PCS Lock-loss workaround for ICH8 */
822 	if (mac->type == e1000_ich8lan)
823 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
824 
825 	return E1000_SUCCESS;
826 }
827 
828 /**
829  *  __e1000_access_emi_reg_locked - Read/write EMI register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: pointer to value to read/write from/to the EMI address
833  *  @read: boolean flag to indicate read or write
834  *
835  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
836  **/
837 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
838 					 u16 *data, bool read)
839 {
840 	s32 ret_val;
841 
842 	DEBUGFUNC("__e1000_access_emi_reg_locked");
843 
844 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
845 	if (ret_val)
846 		return ret_val;
847 
848 	if (read)
849 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
850 						      data);
851 	else
852 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
853 						       *data);
854 
855 	return ret_val;
856 }
857 
858 /**
859  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
860  *  @hw: pointer to the HW structure
861  *  @addr: EMI address to program
862  *  @data: value to be read from the EMI address
863  *
864  *  Assumes the SW/FW/HW Semaphore is already acquired.
865  **/
866 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
867 {
868 	DEBUGFUNC("e1000_read_emi_reg_locked");
869 
870 	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
871 }
872 
873 /**
874  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
875  *  @hw: pointer to the HW structure
876  *  @addr: EMI address to program
877  *  @data: value to be written to the EMI address
878  *
879  *  Assumes the SW/FW/HW Semaphore is already acquired.
880  **/
881 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
882 {
883 	DEBUGFUNC("e1000_read_emi_reg_locked");
884 
885 	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
886 }
887 
888 /**
889  *  e1000_set_eee_pchlan - Enable/disable EEE support
890  *  @hw: pointer to the HW structure
891  *
892  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
893  *  the link and the EEE capabilities of the link partner.  The LPI Control
894  *  register bits will remain set only if/when link is up.
895  *
896  *  EEE LPI must not be asserted earlier than one second after link is up.
897  *  On 82579, EEE LPI should not be enabled until such time otherwise there
898  *  can be link issues with some switches.  Other devices can have EEE LPI
899  *  enabled immediately upon link up since they have a timer in hardware which
900  *  prevents LPI from being asserted too early.
901  **/
902 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
903 {
904 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
905 	s32 ret_val;
906 	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
907 
908 	DEBUGFUNC("e1000_set_eee_pchlan");
909 
910 	switch (hw->phy.type) {
911 	case e1000_phy_82579:
912 		lpa = I82579_EEE_LP_ABILITY;
913 		pcs_status = I82579_EEE_PCS_STATUS;
914 		adv_addr = I82579_EEE_ADVERTISEMENT;
915 		break;
916 	case e1000_phy_i217:
917 		lpa = I217_EEE_LP_ABILITY;
918 		pcs_status = I217_EEE_PCS_STATUS;
919 		adv_addr = I217_EEE_ADVERTISEMENT;
920 		break;
921 	default:
922 		return E1000_SUCCESS;
923 	}
924 
925 	ret_val = hw->phy.ops.acquire(hw);
926 	if (ret_val)
927 		return ret_val;
928 
929 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
930 	if (ret_val)
931 		goto release;
932 
933 	/* Clear bits that enable EEE in various speeds */
934 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
935 
936 	/* Enable EEE if not disabled by user */
937 	if (!dev_spec->eee_disable) {
938 		/* Save off link partner's EEE ability */
939 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
940 						    &dev_spec->eee_lp_ability);
941 		if (ret_val)
942 			goto release;
943 
944 		/* Read EEE advertisement */
945 		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
946 		if (ret_val)
947 			goto release;
948 
949 		/* Enable EEE only for speeds in which the link partner is
950 		 * EEE capable and for which we advertise EEE.
951 		 */
952 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
953 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
954 
955 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
956 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
957 			if (data & NWAY_LPAR_100TX_FD_CAPS)
958 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
959 			else
960 				/* EEE is not supported in 100Half, so ignore
961 				 * partner's EEE in 100 ability if full-duplex
962 				 * is not advertised.
963 				 */
964 				dev_spec->eee_lp_ability &=
965 				    ~I82579_EEE_100_SUPPORTED;
966 		}
967 	}
968 
969 	if (hw->phy.type == e1000_phy_82579) {
970 		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
971 						    &data);
972 		if (ret_val)
973 			goto release;
974 
975 		data &= ~I82579_LPI_100_PLL_SHUT;
976 		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
977 						     data);
978 	}
979 
980 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
981 	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
982 	if (ret_val)
983 		goto release;
984 
985 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
986 release:
987 	hw->phy.ops.release(hw);
988 
989 	return ret_val;
990 }
991 
992 /**
993  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
994  *  @hw:   pointer to the HW structure
995  *  @link: link up bool flag
996  *
997  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
998  *  preventing further DMA write requests.  Workaround the issue by disabling
999  *  the de-assertion of the clock request when in 1Gpbs mode.
1000  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1001  *  speeds in order to avoid Tx hangs.
1002  **/
1003 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1004 {
1005 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1006 	u32 status = E1000_READ_REG(hw, E1000_STATUS);
1007 	s32 ret_val = E1000_SUCCESS;
1008 	u16 reg;
1009 
1010 	if (link && (status & E1000_STATUS_SPEED_1000)) {
1011 		ret_val = hw->phy.ops.acquire(hw);
1012 		if (ret_val)
1013 			return ret_val;
1014 
1015 		ret_val =
1016 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1017 					       &reg);
1018 		if (ret_val)
1019 			goto release;
1020 
1021 		ret_val =
1022 		    e1000_write_kmrn_reg_locked(hw,
1023 						E1000_KMRNCTRLSTA_K1_CONFIG,
1024 						reg &
1025 						~E1000_KMRNCTRLSTA_K1_ENABLE);
1026 		if (ret_val)
1027 			goto release;
1028 
1029 		usec_delay(10);
1030 
1031 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1032 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1033 
1034 		ret_val =
1035 		    e1000_write_kmrn_reg_locked(hw,
1036 						E1000_KMRNCTRLSTA_K1_CONFIG,
1037 						reg);
1038 release:
1039 		hw->phy.ops.release(hw);
1040 	} else {
1041 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
1042 		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1043 
1044 		if (!link || ((status & E1000_STATUS_SPEED_100) &&
1045 			      (status & E1000_STATUS_FD)))
1046 			goto update_fextnvm6;
1047 
1048 		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1049 		if (ret_val)
1050 			return ret_val;
1051 
1052 		/* Clear link status transmit timeout */
1053 		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1054 
1055 		if (status & E1000_STATUS_SPEED_100) {
1056 			/* Set inband Tx timeout to 5x10us for 100Half */
1057 			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1058 
1059 			/* Do not extend the K1 entry latency for 100Half */
1060 			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1061 		} else {
1062 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1063 			reg |= 50 <<
1064 			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1065 
1066 			/* Extend the K1 entry latency for 10 Mbps */
1067 			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1068 		}
1069 
1070 		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1071 		if (ret_val)
1072 			return ret_val;
1073 
1074 update_fextnvm6:
1075 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1076 	}
1077 
1078 	return ret_val;
1079 }
1080 
1081 static u64 e1000_ltr2ns(u16 ltr)
1082 {
1083 	u32 value, scale;
1084 
1085 	/* Determine the latency in nsec based on the LTR value & scale */
1086 	value = ltr & E1000_LTRV_VALUE_MASK;
1087 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1088 
1089 	return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1090 }
1091 
1092 /**
1093  *  e1000_platform_pm_pch_lpt - Set platform power management values
1094  *  @hw: pointer to the HW structure
1095  *  @link: bool indicating link status
1096  *
1097  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1098  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1099  *  when link is up (which must not exceed the maximum latency supported
1100  *  by the platform), otherwise specify there is no LTR requirement.
1101  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1102  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1103  *  Capability register set, on this device LTR is set by writing the
1104  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1105  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1106  *  message to the PMC.
1107  *
1108  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1109  *  high-water mark.
1110  **/
1111 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1112 {
1113 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1114 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1115 	u16 lat_enc = 0;	/* latency encoded */
1116 	s32 obff_hwm = 0;
1117 
1118 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
1119 
1120 	if (link) {
1121 		u16 speed, duplex, scale = 0;
1122 		u16 max_snoop, max_nosnoop;
1123 		u16 max_ltr_enc;	/* max LTR latency encoded */
1124 		s64 lat_ns;		/* latency (ns) */
1125 		s64 value;
1126 		u32 rxa;
1127 
1128 		if (!hw->mac.max_frame_size) {
1129 			DEBUGOUT("max_frame_size not set.\n");
1130 			return -E1000_ERR_CONFIG;
1131 		}
1132 
1133 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1134 		if (!speed) {
1135 			DEBUGOUT("Speed not set.\n");
1136 			return -E1000_ERR_CONFIG;
1137 		}
1138 
1139 		/* Rx Packet Buffer Allocation size (KB) */
1140 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1141 
1142 		/* Determine the maximum latency tolerated by the device.
1143 		 *
1144 		 * Per the PCIe spec, the tolerated latencies are encoded as
1145 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1146 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1147 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1148 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1149 		 */
1150 		lat_ns = ((s64)rxa * 1024 -
1151 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1152 		if (lat_ns < 0)
1153 			lat_ns = 0;
1154 		else
1155 			lat_ns /= speed;
1156 
1157 		value = lat_ns;
1158 		while (value > E1000_LTRV_VALUE_MASK) {
1159 			scale++;
1160 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1161 		}
1162 		if (scale > E1000_LTRV_SCALE_MAX) {
1163 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1164 			return -E1000_ERR_CONFIG;
1165 		}
1166 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1167 
1168 		/* Determine the maximum latency tolerated by the platform */
1169 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1170 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1171 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1172 
1173 		if (lat_enc > max_ltr_enc) {
1174 			lat_enc = max_ltr_enc;
1175 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1176 		}
1177 
1178 		if (lat_ns) {
1179 			lat_ns *= speed * 1000;
1180 			lat_ns /= 8;
1181 			lat_ns /= 1000000000;
1182 			obff_hwm = (s32)(rxa - lat_ns);
1183 		}
1184 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1185 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1186 			return -E1000_ERR_CONFIG;
1187 		}
1188 	}
1189 
1190 	/* Set Snoop and No-Snoop latencies the same */
1191 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1192 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1193 
1194 	/* Set OBFF high water mark */
1195 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1196 	reg |= obff_hwm;
1197 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1198 
1199 	/* Enable OBFF */
1200 	reg = E1000_READ_REG(hw, E1000_SVCR);
1201 	reg |= E1000_SVCR_OFF_EN;
1202 	/* Always unblock interrupts to the CPU even when the system is
1203 	 * in OBFF mode. This ensures that small round-robin traffic
1204 	 * (like ping) does not get dropped or experience long latency.
1205 	 */
1206 	reg |= E1000_SVCR_OFF_MASKINT;
1207 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1208 
1209 	return E1000_SUCCESS;
1210 }
1211 
1212 /**
1213  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1214  *  @hw: pointer to the HW structure
1215  *  @itr: interrupt throttling rate
1216  *
1217  *  Configure OBFF with the updated interrupt rate.
1218  **/
1219 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1220 {
1221 	u32 svcr;
1222 	s32 timer;
1223 
1224 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1225 
1226 	/* Convert ITR value into microseconds for OBFF timer */
1227 	timer = itr & E1000_ITR_MASK;
1228 	timer = (timer * E1000_ITR_MULT) / 1000;
1229 
1230 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1231 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1232 		return -E1000_ERR_CONFIG;
1233 	}
1234 
1235 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1236 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1237 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1238 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1239 
1240 	return E1000_SUCCESS;
1241 }
1242 
1243 /**
1244  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1245  *  @hw: pointer to the HW structure
1246  *  @to_sx: boolean indicating a system power state transition to Sx
1247  *
1248  *  When link is down, configure ULP mode to significantly reduce the power
1249  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1250  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1251  *  system, configure the ULP mode by software.
1252  */
1253 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1254 {
1255 	u32 mac_reg;
1256 	s32 ret_val = E1000_SUCCESS;
1257 	u16 phy_reg;
1258 
1259 	if ((hw->mac.type < e1000_pch_lpt) ||
1260 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1261 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1262 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1263 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1264 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1265 		return 0;
1266 
1267 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1268 		/* Request ME configure ULP mode in the PHY */
1269 		mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1270 		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1271 		E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1272 
1273 		goto out;
1274 	}
1275 
1276 	if (!to_sx) {
1277 		int i = 0;
1278 
1279 		/* Poll up to 5 seconds for Cable Disconnected indication */
1280 		while (!(E1000_READ_REG(hw, E1000_FEXT) &
1281 			 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1282 			/* Bail if link is re-acquired */
1283 			if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1284 				return -E1000_ERR_PHY;
1285 
1286 			if (i++ == 100)
1287 				break;
1288 
1289 			msec_delay(50);
1290 		}
1291 		DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1292 			 (E1000_READ_REG(hw, E1000_FEXT) &
1293 			  E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1294 			 i * 50);
1295 	}
1296 
1297 	ret_val = hw->phy.ops.acquire(hw);
1298 	if (ret_val)
1299 		goto out;
1300 
1301 	/* Force SMBus mode in PHY */
1302 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1303 	if (ret_val)
1304 		goto release;
1305 	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1306 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1307 
1308 	/* Force SMBus mode in MAC */
1309 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1310 	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1311 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1312 
1313 	/* Set Inband ULP Exit, Reset to SMBus mode and
1314 	 * Disable SMBus Release on PERST# in PHY
1315 	 */
1316 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1317 	if (ret_val)
1318 		goto release;
1319 	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1320 		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1321 	if (to_sx) {
1322 		if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1323 			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1324 
1325 		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1326 	} else {
1327 		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1328 	}
1329 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1330 
1331 	/* Set Disable SMBus Release on PERST# in MAC */
1332 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1333 	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1334 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1335 
1336 	/* Commit ULP changes in PHY by starting auto ULP configuration */
1337 	phy_reg |= I218_ULP_CONFIG1_START;
1338 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1339 release:
1340 	hw->phy.ops.release(hw);
1341 out:
1342 	if (ret_val)
1343 		DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1344 	else
1345 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1346 
1347 	return ret_val;
1348 }
1349 
1350 /**
1351  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1352  *  @hw: pointer to the HW structure
1353  *  @force: boolean indicating whether or not to force disabling ULP
1354  *
1355  *  Un-configure ULP mode when link is up, the system is transitioned from
1356  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1357  *  system, poll for an indication from ME that ULP has been un-configured.
1358  *  If not on an ME enabled system, un-configure the ULP mode by software.
1359  *
1360  *  During nominal operation, this function is called when link is acquired
1361  *  to disable ULP mode (force=FALSE); otherwise, for example when unloading
1362  *  the driver or during Sx->S0 transitions, this is called with force=TRUE
1363  *  to forcibly disable ULP.
1364  */
1365 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1366 {
1367 	s32 ret_val = E1000_SUCCESS;
1368 	u32 mac_reg;
1369 	u16 phy_reg;
1370 	int i = 0;
1371 
1372 	if ((hw->mac.type < e1000_pch_lpt) ||
1373 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1374 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1375 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1376 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1377 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1378 		return 0;
1379 
1380 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1381 		if (force) {
1382 			/* Request ME un-configure ULP mode in the PHY */
1383 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1384 			mac_reg &= ~E1000_H2ME_ULP;
1385 			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1386 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1387 		}
1388 
1389 		/* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1390 		while (E1000_READ_REG(hw, E1000_FWSM) &
1391 		       E1000_FWSM_ULP_CFG_DONE) {
1392 			if (i++ == 10) {
1393 				ret_val = -E1000_ERR_PHY;
1394 				goto out;
1395 			}
1396 
1397 			msec_delay(10);
1398 		}
1399 		DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1400 
1401 		if (force) {
1402 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1403 			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1404 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1405 		} else {
1406 			/* Clear H2ME.ULP after ME ULP configuration */
1407 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1408 			mac_reg &= ~E1000_H2ME_ULP;
1409 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1410 		}
1411 
1412 		goto out;
1413 	}
1414 
1415 	ret_val = hw->phy.ops.acquire(hw);
1416 	if (ret_val)
1417 		goto out;
1418 
1419 	if (force)
1420 		/* Toggle LANPHYPC Value bit */
1421 		e1000_toggle_lanphypc_pch_lpt(hw);
1422 
1423 	/* Unforce SMBus mode in PHY */
1424 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1425 	if (ret_val) {
1426 		/* The MAC might be in PCIe mode, so temporarily force to
1427 		 * SMBus mode in order to access the PHY.
1428 		 */
1429 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1430 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1431 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1432 
1433 		msec_delay(50);
1434 
1435 		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1436 						       &phy_reg);
1437 		if (ret_val)
1438 			goto release;
1439 	}
1440 	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1441 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1442 
1443 	/* Unforce SMBus mode in MAC */
1444 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1445 	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1446 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1447 
1448 	/* When ULP mode was previously entered, K1 was disabled by the
1449 	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1450 	 */
1451 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1452 	if (ret_val)
1453 		goto release;
1454 	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1455 	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1456 
1457 	/* Clear ULP enabled configuration */
1458 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1459 	if (ret_val)
1460 		goto release;
1461 		phy_reg &= ~(I218_ULP_CONFIG1_IND |
1462 			     I218_ULP_CONFIG1_STICKY_ULP |
1463 			     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1464 			     I218_ULP_CONFIG1_WOL_HOST |
1465 			     I218_ULP_CONFIG1_INBAND_EXIT |
1466 			     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1467 		e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1468 
1469 		/* Commit ULP changes by starting auto ULP configuration */
1470 		phy_reg |= I218_ULP_CONFIG1_START;
1471 		e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1472 
1473 		/* Clear Disable SMBus Release on PERST# in MAC */
1474 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1475 		mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1476 		E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1477 
1478 release:
1479 	hw->phy.ops.release(hw);
1480 	if (force) {
1481 		hw->phy.ops.reset(hw);
1482 		msec_delay(50);
1483 	}
1484 out:
1485 	if (ret_val)
1486 		DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1487 	else
1488 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1489 
1490 	return ret_val;
1491 }
1492 
1493 /**
1494  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1495  *  @hw: pointer to the HW structure
1496  *
1497  *  Checks to see of the link status of the hardware has changed.  If a
1498  *  change in link status has been detected, then we read the PHY registers
1499  *  to get the current speed/duplex if link exists.
1500  **/
1501 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1502 {
1503 	struct e1000_mac_info *mac = &hw->mac;
1504 	s32 ret_val;
1505 	bool link;
1506 	u16 phy_reg;
1507 
1508 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1509 
1510 	/* We only want to go out to the PHY registers to see if Auto-Neg
1511 	 * has completed and/or if our link status has changed.  The
1512 	 * get_link_status flag is set upon receiving a Link Status
1513 	 * Change or Rx Sequence Error interrupt.
1514 	 */
1515 	if (!mac->get_link_status)
1516 		return E1000_SUCCESS;
1517 
1518 		/* First we want to see if the MII Status Register reports
1519 		 * link.  If so, then we want to get the current speed/duplex
1520 		 * of the PHY.
1521 		 */
1522 		ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1523 		if (ret_val)
1524 			return ret_val;
1525 
1526 	if (hw->mac.type == e1000_pchlan) {
1527 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1528 		if (ret_val)
1529 			return ret_val;
1530 	}
1531 
1532 	/* When connected at 10Mbps half-duplex, some parts are excessively
1533 	 * aggressive resulting in many collisions. To avoid this, increase
1534 	 * the IPG and reduce Rx latency in the PHY.
1535 	 */
1536 	if (((hw->mac.type == e1000_pch2lan) ||
1537 	     (hw->mac.type == e1000_pch_lpt) ||
1538 	     (hw->mac.type == e1000_pch_spt)) && link) {
1539 		u32 reg;
1540 		reg = E1000_READ_REG(hw, E1000_STATUS);
1541 		if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1542 			u16 emi_addr;
1543 
1544 			reg = E1000_READ_REG(hw, E1000_TIPG);
1545 			reg &= ~E1000_TIPG_IPGT_MASK;
1546 			reg |= 0xFF;
1547 			E1000_WRITE_REG(hw, E1000_TIPG, reg);
1548 
1549 			/* Reduce Rx latency in analog PHY */
1550 			ret_val = hw->phy.ops.acquire(hw);
1551 			if (ret_val)
1552 				return ret_val;
1553 
1554 			if (hw->mac.type == e1000_pch2lan)
1555 				emi_addr = I82579_RX_CONFIG;
1556 			else
1557 				emi_addr = I217_RX_CONFIG;
1558 			ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1559 
1560 			hw->phy.ops.release(hw);
1561 
1562 			if (ret_val)
1563 				return ret_val;
1564 		} else if (hw->mac.type == e1000_pch_spt &&
1565 			   (reg & E1000_STATUS_FD) &&
1566 			   (reg & E1000_STATUS_SPEED_MASK) == E1000_STATUS_SPEED_1000) {
1567 			reg &= ~E1000_TIPG_IPGT_MASK;
1568 			reg |= 0x0C;
1569 			E1000_WRITE_REG(hw, E1000_TIPG, reg);
1570 
1571 			ret_val = hw->phy.ops.acquire(hw);
1572 			if (ret_val)
1573 				return ret_val;
1574 
1575 			ret_val = e1000_write_emi_reg_locked(hw, I217_RX_CONFIG, 1);
1576 
1577 			hw->phy.ops.release(hw);
1578 
1579 			if (ret_val)
1580 				return ret_val;
1581 		}
1582 
1583 		/*
1584 		 * What is this for?
1585 		 */
1586 		reg = E1000_READ_REG(hw, E1000_STATUS);
1587 		if (hw->mac.type == e1000_pch_spt &&
1588 		   (reg & E1000_STATUS_FD) &&
1589 		   (reg & E1000_STATUS_SPEED_MASK) == E1000_STATUS_SPEED_1000) {
1590 			u16 data;
1591 			u16 ptr_gap;
1592 
1593 			ret_val = hw->phy.ops.acquire(hw);
1594 			if (ret_val)
1595 				return ret_val;
1596 			hw->phy.ops.read_reg_locked(hw, PHY_REG(776, 20), &data);
1597 			ptr_gap = (data & (0x3FF << 2)) >> 2;
1598 			if (ptr_gap < 0x18) {
1599 				data &= ~(0x3FF << 2);
1600 				data |= (0x18 << 2);
1601 				hw->phy.ops.write_reg_locked(hw,
1602 						     PHY_REG(776, 20),
1603 						     data);
1604 			}
1605 			hw->phy.ops.release(hw);
1606 
1607 			if (ret_val)
1608 				return ret_val;
1609 		}
1610 	}
1611 
1612 	/* I217 Packet Loss issue:
1613 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
1614 	 * on power up.
1615 	 * Set the Beacon Duration for I217 to 8 usec
1616 	 */
1617 	if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
1618 		u32 mac_reg;
1619 
1620 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1621 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1622 		mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1623 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1624 	}
1625 
1626 	/* Work-around I218 hang issue */
1627 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1628 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1629 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1630 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1631 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1632 		if (ret_val)
1633 			return ret_val;
1634 	}
1635 
1636 	if (hw->mac.type == e1000_pch_lpt ||
1637 	    hw->mac.type == e1000_pch_spt) {
1638 		/* Set platform power management values for
1639 		 * Latency Tolerance Reporting (LTR)
1640 		 * Optimized Buffer Flush/Fill (OBFF)
1641 		 */
1642 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1643 		if (ret_val)
1644 			return ret_val;
1645 	}
1646 
1647 	/* Clear link partner's EEE ability */
1648 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1649 
1650 	/* FEXTNVM6 K1-off workaround */
1651 	if (hw->mac.type == e1000_pch_spt) {
1652 		u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1653 		u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1654 
1655 		if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1656 			fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1657 		else
1658 			fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1659 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1660 	}
1661 
1662 	if (!link)
1663 		return E1000_SUCCESS; /* No link detected */
1664 
1665 	mac->get_link_status = FALSE;
1666 
1667 	switch (hw->mac.type) {
1668 	case e1000_pch2lan:
1669 		ret_val = e1000_k1_workaround_lv(hw);
1670 		if (ret_val)
1671 			return ret_val;
1672 		/* fall-thru */
1673 	case e1000_pchlan:
1674 		if (hw->phy.type == e1000_phy_82578) {
1675 			ret_val = e1000_link_stall_workaround_hv(hw);
1676 			if (ret_val)
1677 				return ret_val;
1678 		}
1679 
1680 		/* Workaround for PCHx parts in half-duplex:
1681 		 * Set the number of preambles removed from the packet
1682 		 * when it is passed from the PHY to the MAC to prevent
1683 		 * the MAC from misinterpreting the packet type.
1684 		 */
1685 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1686 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1687 
1688 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1689 		    E1000_STATUS_FD)
1690 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1691 
1692 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1693 		break;
1694 	default:
1695 		break;
1696 	}
1697 
1698 	/* Check if there was DownShift, must be checked
1699 	 * immediately after link-up
1700 	 */
1701 	e1000_check_downshift_generic(hw);
1702 
1703 	/* Enable/Disable EEE after link up */
1704 	if (hw->phy.type > e1000_phy_82579) {
1705 		ret_val = e1000_set_eee_pchlan(hw);
1706 		if (ret_val)
1707 			return ret_val;
1708 	}
1709 
1710 	/* If we are forcing speed/duplex, then we simply return since
1711 	 * we have already determined whether we have link or not.
1712 	 */
1713 	if (!mac->autoneg)
1714 		return -E1000_ERR_CONFIG;
1715 
1716 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1717 	 * of MAC speed/duplex configuration.  So we only need to
1718 	 * configure Collision Distance in the MAC.
1719 	 */
1720 	mac->ops.config_collision_dist(hw);
1721 
1722 	/* Configure Flow Control now that Auto-Neg has completed.
1723 	 * First, we need to restore the desired flow control
1724 	 * settings because we may have had to re-autoneg with a
1725 	 * different link partner.
1726 	 */
1727 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1728 	if (ret_val)
1729 		DEBUGOUT("Error configuring flow control\n");
1730 
1731 	return ret_val;
1732 }
1733 
1734 /**
1735  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1736  *  @hw: pointer to the HW structure
1737  *
1738  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1739  **/
1740 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1741 {
1742 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1743 
1744 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1745 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1746 	switch (hw->mac.type) {
1747 	case e1000_ich8lan:
1748 	case e1000_ich9lan:
1749 	case e1000_ich10lan:
1750 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1751 		break;
1752 	case e1000_pchlan:
1753 	case e1000_pch2lan:
1754 	case e1000_pch_lpt:
1755 	case e1000_pch_spt:
1756 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1757 		break;
1758 	default:
1759 		break;
1760 	}
1761 }
1762 
1763 /**
1764  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1765  *  @hw: pointer to the HW structure
1766  *
1767  *  Acquires the mutex for performing NVM operations.
1768  **/
1769 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1770 {
1771 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1772 	return E1000_SUCCESS;
1773 }
1774 
1775 /**
1776  *  e1000_release_nvm_ich8lan - Release NVM mutex
1777  *  @hw: pointer to the HW structure
1778  *
1779  *  Releases the mutex used while performing NVM operations.
1780  **/
1781 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1782 {
1783 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1784 	return;
1785 }
1786 
1787 /**
1788  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1789  *  @hw: pointer to the HW structure
1790  *
1791  *  Acquires the software control flag for performing PHY and select
1792  *  MAC CSR accesses.
1793  **/
1794 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1795 {
1796 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1797 	s32 ret_val = E1000_SUCCESS;
1798 
1799 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1800 
1801 	while (timeout) {
1802 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1803 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1804 			break;
1805 
1806 		msec_delay_irq(1);
1807 		timeout--;
1808 	}
1809 
1810 	if (!timeout) {
1811 		DEBUGOUT("SW has already locked the resource.\n");
1812 		ret_val = -E1000_ERR_CONFIG;
1813 		goto out;
1814 	}
1815 
1816 	timeout = SW_FLAG_TIMEOUT;
1817 
1818 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1819 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1820 
1821 	while (timeout) {
1822 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1823 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1824 			break;
1825 
1826 		msec_delay_irq(1);
1827 		timeout--;
1828 	}
1829 
1830 	if (!timeout) {
1831 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1832 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1833 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1834 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1835 		ret_val = -E1000_ERR_CONFIG;
1836 		goto out;
1837 	}
1838 
1839 out:
1840 	return ret_val;
1841 }
1842 
1843 /**
1844  *  e1000_release_swflag_ich8lan - Release software control flag
1845  *  @hw: pointer to the HW structure
1846  *
1847  *  Releases the software control flag for performing PHY and select
1848  *  MAC CSR accesses.
1849  **/
1850 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1851 {
1852 	u32 extcnf_ctrl;
1853 
1854 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1855 
1856 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1857 
1858 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1859 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1860 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1861 	} else {
1862 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1863 	}
1864 	return;
1865 }
1866 
1867 /**
1868  *  e1000_check_mng_mode_ich8lan - Checks management mode
1869  *  @hw: pointer to the HW structure
1870  *
1871  *  This checks if the adapter has any manageability enabled.
1872  *  This is a function pointer entry point only called by read/write
1873  *  routines for the PHY and NVM parts.
1874  **/
1875 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1876 {
1877 	u32 fwsm;
1878 
1879 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1880 
1881 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1882 
1883 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1884 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1885 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1886 }
1887 
1888 /**
1889  *  e1000_check_mng_mode_pchlan - Checks management mode
1890  *  @hw: pointer to the HW structure
1891  *
1892  *  This checks if the adapter has iAMT enabled.
1893  *  This is a function pointer entry point only called by read/write
1894  *  routines for the PHY and NVM parts.
1895  **/
1896 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1897 {
1898 	u32 fwsm;
1899 
1900 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1901 
1902 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1903 
1904 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1905 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1906 }
1907 
1908 /**
1909  *  e1000_rar_set_pch2lan - Set receive address register
1910  *  @hw: pointer to the HW structure
1911  *  @addr: pointer to the receive address
1912  *  @index: receive address array register
1913  *
1914  *  Sets the receive address array register at index to the address passed
1915  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1916  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1917  *  Use SHRA[0-3] in place of those reserved for ME.
1918  **/
1919 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1920 {
1921 	u32 rar_low, rar_high;
1922 
1923 	DEBUGFUNC("e1000_rar_set_pch2lan");
1924 
1925 	/* HW expects these in little endian so we reverse the byte order
1926 	 * from network order (big endian) to little endian
1927 	 */
1928 	rar_low = ((u32) addr[0] |
1929 		   ((u32) addr[1] << 8) |
1930 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1931 
1932 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1933 
1934 	/* If MAC address zero, no need to set the AV bit */
1935 	if (rar_low || rar_high)
1936 		rar_high |= E1000_RAH_AV;
1937 
1938 	if (index == 0) {
1939 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1940 		E1000_WRITE_FLUSH(hw);
1941 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1942 		E1000_WRITE_FLUSH(hw);
1943 		return E1000_SUCCESS;
1944 	}
1945 
1946 	/* RAR[1-6] are owned by manageability.  Skip those and program the
1947 	 * next address into the SHRA register array.
1948 	 */
1949 	if (index < (u32) (hw->mac.rar_entry_count)) {
1950 		s32 ret_val;
1951 
1952 		ret_val = e1000_acquire_swflag_ich8lan(hw);
1953 		if (ret_val)
1954 			goto out;
1955 
1956 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1957 		E1000_WRITE_FLUSH(hw);
1958 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1959 		E1000_WRITE_FLUSH(hw);
1960 
1961 		e1000_release_swflag_ich8lan(hw);
1962 
1963 		/* verify the register updates */
1964 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1965 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1966 			return E1000_SUCCESS;
1967 
1968 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1969 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1970 	}
1971 
1972 out:
1973 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1974 	return -E1000_ERR_CONFIG;
1975 }
1976 
1977 /**
1978  *  e1000_rar_set_pch_lpt - Set receive address registers
1979  *  @hw: pointer to the HW structure
1980  *  @addr: pointer to the receive address
1981  *  @index: receive address array register
1982  *
1983  *  Sets the receive address register array at index to the address passed
1984  *  in by addr. For LPT, RAR[0] is the base address register that is to
1985  *  contain the MAC address. SHRA[0-10] are the shared receive address
1986  *  registers that are shared between the Host and manageability engine (ME).
1987  **/
1988 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1989 {
1990 	u32 rar_low, rar_high;
1991 	u32 wlock_mac;
1992 
1993 	DEBUGFUNC("e1000_rar_set_pch_lpt");
1994 
1995 	/* HW expects these in little endian so we reverse the byte order
1996 	 * from network order (big endian) to little endian
1997 	 */
1998 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1999 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2000 
2001 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2002 
2003 	/* If MAC address zero, no need to set the AV bit */
2004 	if (rar_low || rar_high)
2005 		rar_high |= E1000_RAH_AV;
2006 
2007 	if (index == 0) {
2008 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2009 		E1000_WRITE_FLUSH(hw);
2010 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2011 		E1000_WRITE_FLUSH(hw);
2012 		return E1000_SUCCESS;
2013 	}
2014 
2015 	/* The manageability engine (ME) can lock certain SHRAR registers that
2016 	 * it is using - those registers are unavailable for use.
2017 	 */
2018 	if (index < hw->mac.rar_entry_count) {
2019 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2020 			    E1000_FWSM_WLOCK_MAC_MASK;
2021 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2022 
2023 		/* Check if all SHRAR registers are locked */
2024 		if (wlock_mac == 1)
2025 			goto out;
2026 
2027 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
2028 			s32 ret_val;
2029 
2030 			ret_val = e1000_acquire_swflag_ich8lan(hw);
2031 
2032 			if (ret_val)
2033 				goto out;
2034 
2035 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2036 					rar_low);
2037 			E1000_WRITE_FLUSH(hw);
2038 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2039 					rar_high);
2040 			E1000_WRITE_FLUSH(hw);
2041 
2042 			e1000_release_swflag_ich8lan(hw);
2043 
2044 			/* verify the register updates */
2045 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2046 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2047 				return E1000_SUCCESS;
2048 		}
2049 	}
2050 
2051 out:
2052 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2053 	return -E1000_ERR_CONFIG;
2054 }
2055 
2056 /**
2057  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2058  *  @hw: pointer to the HW structure
2059  *  @mc_addr_list: array of multicast addresses to program
2060  *  @mc_addr_count: number of multicast addresses to program
2061  *
2062  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2063  *  The caller must have a packed mc_addr_list of multicast addresses.
2064  **/
2065 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2066 					      u8 *mc_addr_list,
2067 					      u32 mc_addr_count)
2068 {
2069 	u16 phy_reg = 0;
2070 	int i;
2071 	s32 ret_val;
2072 
2073 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2074 
2075 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2076 
2077 	ret_val = hw->phy.ops.acquire(hw);
2078 	if (ret_val)
2079 		return;
2080 
2081 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2082 	if (ret_val)
2083 		goto release;
2084 
2085 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
2086 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2087 					   (u16)(hw->mac.mta_shadow[i] &
2088 						 0xFFFF));
2089 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2090 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
2091 						 0xFFFF));
2092 	}
2093 
2094 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2095 
2096 release:
2097 	hw->phy.ops.release(hw);
2098 }
2099 
2100 /**
2101  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2102  *  @hw: pointer to the HW structure
2103  *
2104  *  Checks if firmware is blocking the reset of the PHY.
2105  *  This is a function pointer entry point only called by
2106  *  reset routines.
2107  **/
2108 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2109 {
2110 	u32 fwsm;
2111 	bool blocked = FALSE;
2112 	int i = 0;
2113 
2114 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
2115 
2116 	do {
2117 		fwsm = E1000_READ_REG(hw, E1000_FWSM);
2118 		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2119 			blocked = TRUE;
2120 			msec_delay(10);
2121 			continue;
2122 		}
2123 		blocked = FALSE;
2124 	} while (blocked && (i++ < 10));
2125 	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2126 }
2127 
2128 /**
2129  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2130  *  @hw: pointer to the HW structure
2131  *
2132  *  Assumes semaphore already acquired.
2133  *
2134  **/
2135 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2136 {
2137 	u16 phy_data;
2138 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2139 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2140 		E1000_STRAP_SMT_FREQ_SHIFT;
2141 	s32 ret_val;
2142 
2143 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2144 
2145 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2146 	if (ret_val)
2147 		return ret_val;
2148 
2149 	phy_data &= ~HV_SMB_ADDR_MASK;
2150 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2151 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2152 
2153 	if (hw->phy.type == e1000_phy_i217) {
2154 		/* Restore SMBus frequency */
2155 		if (freq--) {
2156 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2157 			phy_data |= (freq & (1 << 0)) <<
2158 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
2159 			phy_data |= (freq & (1 << 1)) <<
2160 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2161 		} else {
2162 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
2163 		}
2164 	}
2165 
2166 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2167 }
2168 
2169 /**
2170  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2171  *  @hw:   pointer to the HW structure
2172  *
2173  *  SW should configure the LCD from the NVM extended configuration region
2174  *  as a workaround for certain parts.
2175  **/
2176 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2177 {
2178 	struct e1000_phy_info *phy = &hw->phy;
2179 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2180 	s32 ret_val = E1000_SUCCESS;
2181 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2182 
2183 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2184 
2185 	/* Initialize the PHY from the NVM on ICH platforms.  This
2186 	 * is needed due to an issue where the NVM configuration is
2187 	 * not properly autoloaded after power transitions.
2188 	 * Therefore, after each PHY reset, we will load the
2189 	 * configuration data out of the NVM manually.
2190 	 */
2191 	switch (hw->mac.type) {
2192 	case e1000_ich8lan:
2193 		if (phy->type != e1000_phy_igp_3)
2194 			return ret_val;
2195 
2196 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2197 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2198 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2199 			break;
2200 		}
2201 		/* Fall-thru */
2202 	case e1000_pchlan:
2203 	case e1000_pch2lan:
2204 	case e1000_pch_lpt:
2205 	case e1000_pch_spt:
2206 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2207 		break;
2208 	default:
2209 		return ret_val;
2210 	}
2211 
2212 	ret_val = hw->phy.ops.acquire(hw);
2213 	if (ret_val)
2214 		return ret_val;
2215 
2216 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
2217 	if (!(data & sw_cfg_mask))
2218 		goto release;
2219 
2220 	/* Make sure HW does not configure LCD from PHY
2221 	 * extended configuration before SW configuration
2222 	 */
2223 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2224 	if ((hw->mac.type < e1000_pch2lan) &&
2225 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2226 			goto release;
2227 
2228 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2229 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2230 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2231 	if (!cnf_size)
2232 		goto release;
2233 
2234 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2235 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2236 
2237 	if (((hw->mac.type == e1000_pchlan) &&
2238 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2239 	    (hw->mac.type > e1000_pchlan)) {
2240 		/* HW configures the SMBus address and LEDs when the
2241 		 * OEM and LCD Write Enable bits are set in the NVM.
2242 		 * When both NVM bits are cleared, SW will configure
2243 		 * them instead.
2244 		 */
2245 		ret_val = e1000_write_smbus_addr(hw);
2246 		if (ret_val)
2247 			goto release;
2248 
2249 		data = E1000_READ_REG(hw, E1000_LEDCTL);
2250 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2251 							(u16)data);
2252 		if (ret_val)
2253 			goto release;
2254 	}
2255 
2256 	/* Configure LCD from extended configuration region. */
2257 
2258 	/* cnf_base_addr is in DWORD */
2259 	word_addr = (u16)(cnf_base_addr << 1);
2260 
2261 	for (i = 0; i < cnf_size; i++) {
2262 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2263 					   &reg_data);
2264 		if (ret_val)
2265 			goto release;
2266 
2267 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2268 					   1, &reg_addr);
2269 		if (ret_val)
2270 			goto release;
2271 
2272 		/* Save off the PHY page for future writes. */
2273 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2274 			phy_page = reg_data;
2275 			continue;
2276 		}
2277 
2278 		reg_addr &= PHY_REG_MASK;
2279 		reg_addr |= phy_page;
2280 
2281 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2282 						    reg_data);
2283 		if (ret_val)
2284 			goto release;
2285 	}
2286 
2287 release:
2288 	hw->phy.ops.release(hw);
2289 	return ret_val;
2290 }
2291 
2292 /**
2293  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2294  *  @hw:   pointer to the HW structure
2295  *  @link: link up bool flag
2296  *
2297  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2298  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2299  *  If link is down, the function will restore the default K1 setting located
2300  *  in the NVM.
2301  **/
2302 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2303 {
2304 	s32 ret_val = E1000_SUCCESS;
2305 	u16 status_reg = 0;
2306 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2307 
2308 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
2309 
2310 	if (hw->mac.type != e1000_pchlan)
2311 		return E1000_SUCCESS;
2312 
2313 	/* Wrap the whole flow with the sw flag */
2314 	ret_val = hw->phy.ops.acquire(hw);
2315 	if (ret_val)
2316 		return ret_val;
2317 
2318 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2319 	if (link) {
2320 		if (hw->phy.type == e1000_phy_82578) {
2321 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2322 							      &status_reg);
2323 			if (ret_val)
2324 				goto release;
2325 
2326 			status_reg &= (BM_CS_STATUS_LINK_UP |
2327 				       BM_CS_STATUS_RESOLVED |
2328 				       BM_CS_STATUS_SPEED_MASK);
2329 
2330 			if (status_reg == (BM_CS_STATUS_LINK_UP |
2331 					   BM_CS_STATUS_RESOLVED |
2332 					   BM_CS_STATUS_SPEED_1000))
2333 				k1_enable = FALSE;
2334 		}
2335 
2336 		if (hw->phy.type == e1000_phy_82577) {
2337 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2338 							      &status_reg);
2339 			if (ret_val)
2340 				goto release;
2341 
2342 			status_reg &= (HV_M_STATUS_LINK_UP |
2343 				       HV_M_STATUS_AUTONEG_COMPLETE |
2344 				       HV_M_STATUS_SPEED_MASK);
2345 
2346 			if (status_reg == (HV_M_STATUS_LINK_UP |
2347 					   HV_M_STATUS_AUTONEG_COMPLETE |
2348 					   HV_M_STATUS_SPEED_1000))
2349 				k1_enable = FALSE;
2350 		}
2351 
2352 		/* Link stall fix for link up */
2353 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2354 						       0x0100);
2355 		if (ret_val)
2356 			goto release;
2357 
2358 	} else {
2359 		/* Link stall fix for link down */
2360 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2361 						       0x4100);
2362 		if (ret_val)
2363 			goto release;
2364 	}
2365 
2366 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2367 
2368 release:
2369 	hw->phy.ops.release(hw);
2370 
2371 	return ret_val;
2372 }
2373 
2374 /**
2375  *  e1000_configure_k1_ich8lan - Configure K1 power state
2376  *  @hw: pointer to the HW structure
2377  *  @enable: K1 state to configure
2378  *
2379  *  Configure the K1 power state based on the provided parameter.
2380  *  Assumes semaphore already acquired.
2381  *
2382  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2383  **/
2384 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2385 {
2386 	s32 ret_val;
2387 	u32 ctrl_reg = 0;
2388 	u32 ctrl_ext = 0;
2389 	u32 reg = 0;
2390 	u16 kmrn_reg = 0;
2391 
2392 	DEBUGFUNC("e1000_configure_k1_ich8lan");
2393 
2394 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2395 					     &kmrn_reg);
2396 	if (ret_val)
2397 		return ret_val;
2398 
2399 	if (k1_enable)
2400 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2401 	else
2402 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2403 
2404 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2405 					      kmrn_reg);
2406 	if (ret_val)
2407 		return ret_val;
2408 
2409 	usec_delay(20);
2410 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2411 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2412 
2413 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2414 	reg |= E1000_CTRL_FRCSPD;
2415 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2416 
2417 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2418 	E1000_WRITE_FLUSH(hw);
2419 	usec_delay(20);
2420 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2421 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2422 	E1000_WRITE_FLUSH(hw);
2423 	usec_delay(20);
2424 
2425 	return E1000_SUCCESS;
2426 }
2427 
2428 /**
2429  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2430  *  @hw:       pointer to the HW structure
2431  *  @d0_state: boolean if entering d0 or d3 device state
2432  *
2433  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2434  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2435  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2436  **/
2437 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2438 {
2439 	s32 ret_val = 0;
2440 	u32 mac_reg;
2441 	u16 oem_reg;
2442 
2443 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2444 
2445 	if (hw->mac.type < e1000_pchlan)
2446 		return ret_val;
2447 
2448 	ret_val = hw->phy.ops.acquire(hw);
2449 	if (ret_val)
2450 		return ret_val;
2451 
2452 	if (hw->mac.type == e1000_pchlan) {
2453 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2454 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2455 			goto release;
2456 	}
2457 
2458 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2459 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2460 		goto release;
2461 
2462 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2463 
2464 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2465 	if (ret_val)
2466 		goto release;
2467 
2468 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2469 
2470 	if (d0_state) {
2471 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2472 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2473 
2474 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2475 			oem_reg |= HV_OEM_BITS_LPLU;
2476 	} else {
2477 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2478 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2479 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2480 
2481 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2482 		    E1000_PHY_CTRL_NOND0A_LPLU))
2483 			oem_reg |= HV_OEM_BITS_LPLU;
2484 	}
2485 
2486 	/* Set Restart auto-neg to activate the bits */
2487 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2488 	    !hw->phy.ops.check_reset_block(hw))
2489 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2490 
2491 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2492 
2493 release:
2494 	hw->phy.ops.release(hw);
2495 
2496 	return ret_val;
2497 }
2498 
2499 
2500 /**
2501  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2502  *  @hw:   pointer to the HW structure
2503  **/
2504 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2505 {
2506 	s32 ret_val;
2507 	u16 data;
2508 
2509 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2510 
2511 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2512 	if (ret_val)
2513 		return ret_val;
2514 
2515 	data |= HV_KMRN_MDIO_SLOW;
2516 
2517 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2518 
2519 	return ret_val;
2520 }
2521 
2522 /**
2523  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2524  *  done after every PHY reset.
2525  **/
2526 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2527 {
2528 	s32 ret_val = E1000_SUCCESS;
2529 	u16 phy_data;
2530 
2531 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2532 
2533 	if (hw->mac.type != e1000_pchlan)
2534 		return E1000_SUCCESS;
2535 
2536 	/* Set MDIO slow mode before any other MDIO access */
2537 	if (hw->phy.type == e1000_phy_82577) {
2538 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2539 		if (ret_val)
2540 			return ret_val;
2541 	}
2542 
2543 	if (((hw->phy.type == e1000_phy_82577) &&
2544 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2545 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2546 		/* Disable generation of early preamble */
2547 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2548 		if (ret_val)
2549 			return ret_val;
2550 
2551 		/* Preamble tuning for SSC */
2552 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2553 						0xA204);
2554 		if (ret_val)
2555 			return ret_val;
2556 	}
2557 
2558 	if (hw->phy.type == e1000_phy_82578) {
2559 		/* Return registers to default by doing a soft reset then
2560 		 * writing 0x3140 to the control register.
2561 		 */
2562 		if (hw->phy.revision < 2) {
2563 			e1000_phy_sw_reset_generic(hw);
2564 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2565 							0x3140);
2566 		}
2567 	}
2568 
2569 	/* Select page 0 */
2570 	ret_val = hw->phy.ops.acquire(hw);
2571 	if (ret_val)
2572 		return ret_val;
2573 
2574 	hw->phy.addr = 1;
2575 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2576 	hw->phy.ops.release(hw);
2577 	if (ret_val)
2578 		return ret_val;
2579 
2580 	/* Configure the K1 Si workaround during phy reset assuming there is
2581 	 * link so that it disables K1 if link is in 1Gbps.
2582 	 */
2583 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2584 	if (ret_val)
2585 		return ret_val;
2586 
2587 	/* Workaround for link disconnects on a busy hub in half duplex */
2588 	ret_val = hw->phy.ops.acquire(hw);
2589 	if (ret_val)
2590 		return ret_val;
2591 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2592 	if (ret_val)
2593 		goto release;
2594 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2595 					       phy_data & 0x00FF);
2596 	if (ret_val)
2597 		goto release;
2598 
2599 	/* set MSE higher to enable link to stay up when noise is high */
2600 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2601 release:
2602 	hw->phy.ops.release(hw);
2603 
2604 	return ret_val;
2605 }
2606 
2607 /**
2608  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2609  *  @hw:   pointer to the HW structure
2610  **/
2611 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2612 {
2613 	u32 mac_reg;
2614 	u16 i, phy_reg = 0;
2615 	s32 ret_val;
2616 
2617 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2618 
2619 	ret_val = hw->phy.ops.acquire(hw);
2620 	if (ret_val)
2621 		return;
2622 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2623 	if (ret_val)
2624 		goto release;
2625 
2626 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2627 	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2628 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2629 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2630 					   (u16)(mac_reg & 0xFFFF));
2631 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2632 					   (u16)((mac_reg >> 16) & 0xFFFF));
2633 
2634 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2635 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2636 					   (u16)(mac_reg & 0xFFFF));
2637 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2638 					   (u16)((mac_reg & E1000_RAH_AV)
2639 						 >> 16));
2640 	}
2641 
2642 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2643 
2644 release:
2645 	hw->phy.ops.release(hw);
2646 }
2647 
2648 static u32 e1000_calc_rx_da_crc(u8 mac[])
2649 {
2650 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2651 	u32 i, j, mask, crc;
2652 
2653 	DEBUGFUNC("e1000_calc_rx_da_crc");
2654 
2655 	crc = 0xffffffff;
2656 	for (i = 0; i < 6; i++) {
2657 		crc = crc ^ mac[i];
2658 		for (j = 8; j > 0; j--) {
2659 			mask = (crc & 1) * (-1);
2660 			crc = (crc >> 1) ^ (poly & mask);
2661 		}
2662 	}
2663 	return ~crc;
2664 }
2665 
2666 /**
2667  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2668  *  with 82579 PHY
2669  *  @hw: pointer to the HW structure
2670  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2671  **/
2672 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2673 {
2674 	s32 ret_val = E1000_SUCCESS;
2675 	u16 phy_reg, data;
2676 	u32 mac_reg;
2677 	u16 i;
2678 
2679 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2680 
2681 	if (hw->mac.type < e1000_pch2lan)
2682 		return E1000_SUCCESS;
2683 
2684 	/* disable Rx path while enabling/disabling workaround */
2685 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2686 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2687 					phy_reg | (1 << 14));
2688 	if (ret_val)
2689 		return ret_val;
2690 
2691 	if (enable) {
2692 		/* Write Rx addresses (rar_entry_count for RAL/H, and
2693 		 * SHRAL/H) and initial CRC values to the MAC
2694 		 */
2695 		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2696 			u8 mac_addr[ETH_ADDR_LEN] = {0};
2697 			u32 addr_high, addr_low;
2698 
2699 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2700 			if (!(addr_high & E1000_RAH_AV))
2701 				continue;
2702 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2703 			mac_addr[0] = (addr_low & 0xFF);
2704 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2705 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2706 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2707 			mac_addr[4] = (addr_high & 0xFF);
2708 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2709 
2710 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2711 					e1000_calc_rx_da_crc(mac_addr));
2712 		}
2713 
2714 		/* Write Rx addresses to the PHY */
2715 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2716 
2717 		/* Enable jumbo frame workaround in the MAC */
2718 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2719 		mac_reg &= ~(1 << 14);
2720 		mac_reg |= (7 << 15);
2721 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2722 
2723 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2724 		mac_reg |= E1000_RCTL_SECRC;
2725 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2726 
2727 		ret_val = e1000_read_kmrn_reg_generic(hw,
2728 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2729 						&data);
2730 		if (ret_val)
2731 			return ret_val;
2732 		ret_val = e1000_write_kmrn_reg_generic(hw,
2733 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2734 						data | (1 << 0));
2735 		if (ret_val)
2736 			return ret_val;
2737 		ret_val = e1000_read_kmrn_reg_generic(hw,
2738 						E1000_KMRNCTRLSTA_HD_CTRL,
2739 						&data);
2740 		if (ret_val)
2741 			return ret_val;
2742 		data &= ~(0xF << 8);
2743 		data |= (0xB << 8);
2744 		ret_val = e1000_write_kmrn_reg_generic(hw,
2745 						E1000_KMRNCTRLSTA_HD_CTRL,
2746 						data);
2747 		if (ret_val)
2748 			return ret_val;
2749 
2750 		/* Enable jumbo frame workaround in the PHY */
2751 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2752 		data &= ~(0x7F << 5);
2753 		data |= (0x37 << 5);
2754 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2755 		if (ret_val)
2756 			return ret_val;
2757 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2758 		data &= ~(1 << 13);
2759 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2760 		if (ret_val)
2761 			return ret_val;
2762 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2763 		data &= ~(0x3FF << 2);
2764 		data |= (E1000_TX_PTR_GAP << 2);
2765 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2766 		if (ret_val)
2767 			return ret_val;
2768 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2769 		if (ret_val)
2770 			return ret_val;
2771 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2772 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2773 						(1 << 10));
2774 		if (ret_val)
2775 			return ret_val;
2776 	} else {
2777 		/* Write MAC register values back to h/w defaults */
2778 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2779 		mac_reg &= ~(0xF << 14);
2780 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2781 
2782 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2783 		mac_reg &= ~E1000_RCTL_SECRC;
2784 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2785 
2786 		ret_val = e1000_read_kmrn_reg_generic(hw,
2787 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2788 						&data);
2789 		if (ret_val)
2790 			return ret_val;
2791 		ret_val = e1000_write_kmrn_reg_generic(hw,
2792 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2793 						data & ~(1 << 0));
2794 		if (ret_val)
2795 			return ret_val;
2796 		ret_val = e1000_read_kmrn_reg_generic(hw,
2797 						E1000_KMRNCTRLSTA_HD_CTRL,
2798 						&data);
2799 		if (ret_val)
2800 			return ret_val;
2801 		data &= ~(0xF << 8);
2802 		data |= (0xB << 8);
2803 		ret_val = e1000_write_kmrn_reg_generic(hw,
2804 						E1000_KMRNCTRLSTA_HD_CTRL,
2805 						data);
2806 		if (ret_val)
2807 			return ret_val;
2808 
2809 		/* Write PHY register values back to h/w defaults */
2810 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2811 		data &= ~(0x7F << 5);
2812 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2813 		if (ret_val)
2814 			return ret_val;
2815 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2816 		data |= (1 << 13);
2817 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2818 		if (ret_val)
2819 			return ret_val;
2820 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2821 		data &= ~(0x3FF << 2);
2822 		data |= (0x8 << 2);
2823 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2824 		if (ret_val)
2825 			return ret_val;
2826 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2827 		if (ret_val)
2828 			return ret_val;
2829 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2830 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2831 						~(1 << 10));
2832 		if (ret_val)
2833 			return ret_val;
2834 	}
2835 
2836 	/* re-enable Rx path after enabling/disabling workaround */
2837 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2838 				     ~(1 << 14));
2839 }
2840 
2841 /**
2842  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2843  *  done after every PHY reset.
2844  **/
2845 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2846 {
2847 	s32 ret_val = E1000_SUCCESS;
2848 
2849 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2850 
2851 	if (hw->mac.type != e1000_pch2lan)
2852 		return E1000_SUCCESS;
2853 
2854 	/* Set MDIO slow mode before any other MDIO access */
2855 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2856 	if (ret_val)
2857 		return ret_val;
2858 
2859 	ret_val = hw->phy.ops.acquire(hw);
2860 	if (ret_val)
2861 		return ret_val;
2862 	/* set MSE higher to enable link to stay up when noise is high */
2863 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2864 	if (ret_val)
2865 		goto release;
2866 	/* drop link after 5 times MSE threshold was reached */
2867 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2868 release:
2869 	hw->phy.ops.release(hw);
2870 
2871 	return ret_val;
2872 }
2873 
2874 /**
2875  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2876  *  @hw:   pointer to the HW structure
2877  *
2878  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2879  *  Disable K1 for 1000 and 100 speeds
2880  **/
2881 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2882 {
2883 	s32 ret_val = E1000_SUCCESS;
2884 	u16 status_reg = 0;
2885 
2886 	DEBUGFUNC("e1000_k1_workaround_lv");
2887 
2888 	if (hw->mac.type != e1000_pch2lan)
2889 		return E1000_SUCCESS;
2890 
2891 	/* Set K1 beacon duration based on 10Mbs speed */
2892 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2893 	if (ret_val)
2894 		return ret_val;
2895 
2896 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2897 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2898 		if (status_reg &
2899 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2900 			u16 pm_phy_reg;
2901 
2902 			/* LV 1G/100 Packet drop issue wa  */
2903 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2904 						       &pm_phy_reg);
2905 			if (ret_val)
2906 				return ret_val;
2907 			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2908 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2909 							pm_phy_reg);
2910 			if (ret_val)
2911 				return ret_val;
2912 		} else {
2913 			u32 mac_reg;
2914 			mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2915 			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2916 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2917 			E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2918 		}
2919 	}
2920 
2921 	return ret_val;
2922 }
2923 
2924 /**
2925  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2926  *  @hw:   pointer to the HW structure
2927  *  @gate: boolean set to TRUE to gate, FALSE to ungate
2928  *
2929  *  Gate/ungate the automatic PHY configuration via hardware; perform
2930  *  the configuration via software instead.
2931  **/
2932 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2933 {
2934 	u32 extcnf_ctrl;
2935 
2936 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2937 
2938 	if (hw->mac.type < e1000_pch2lan)
2939 		return;
2940 
2941 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2942 
2943 	if (gate)
2944 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2945 	else
2946 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2947 
2948 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2949 }
2950 
2951 /**
2952  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2953  *  @hw: pointer to the HW structure
2954  *
2955  *  Check the appropriate indication the MAC has finished configuring the
2956  *  PHY after a software reset.
2957  **/
2958 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2959 {
2960 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2961 
2962 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
2963 
2964 	/* Wait for basic configuration completes before proceeding */
2965 	do {
2966 		data = E1000_READ_REG(hw, E1000_STATUS);
2967 		data &= E1000_STATUS_LAN_INIT_DONE;
2968 		usec_delay(100);
2969 	} while ((!data) && --loop);
2970 
2971 	/* If basic configuration is incomplete before the above loop
2972 	 * count reaches 0, loading the configuration from NVM will
2973 	 * leave the PHY in a bad state possibly resulting in no link.
2974 	 */
2975 	if (loop == 0)
2976 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2977 
2978 	/* Clear the Init Done bit for the next init event */
2979 	data = E1000_READ_REG(hw, E1000_STATUS);
2980 	data &= ~E1000_STATUS_LAN_INIT_DONE;
2981 	E1000_WRITE_REG(hw, E1000_STATUS, data);
2982 }
2983 
2984 /**
2985  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2986  *  @hw: pointer to the HW structure
2987  **/
2988 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2989 {
2990 	s32 ret_val = E1000_SUCCESS;
2991 	u16 reg;
2992 
2993 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2994 
2995 	if (hw->phy.ops.check_reset_block(hw))
2996 		return E1000_SUCCESS;
2997 
2998 	/* Allow time for h/w to get to quiescent state after reset */
2999 	msec_delay(10);
3000 
3001 	/* Perform any necessary post-reset workarounds */
3002 	switch (hw->mac.type) {
3003 	case e1000_pchlan:
3004 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3005 		if (ret_val)
3006 			return ret_val;
3007 		break;
3008 	case e1000_pch2lan:
3009 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3010 		if (ret_val)
3011 			return ret_val;
3012 		break;
3013 	default:
3014 		break;
3015 	}
3016 
3017 	/* Clear the host wakeup bit after lcd reset */
3018 	if (hw->mac.type >= e1000_pchlan) {
3019 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3020 		reg &= ~BM_WUC_HOST_WU_BIT;
3021 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3022 	}
3023 
3024 	/* Configure the LCD with the extended configuration region in NVM */
3025 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
3026 	if (ret_val)
3027 		return ret_val;
3028 
3029 	/* Configure the LCD with the OEM bits in NVM */
3030 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3031 
3032 	if (hw->mac.type == e1000_pch2lan) {
3033 		/* Ungate automatic PHY configuration on non-managed 82579 */
3034 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
3035 		    E1000_ICH_FWSM_FW_VALID)) {
3036 			msec_delay(10);
3037 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3038 		}
3039 
3040 		/* Set EEE LPI Update Timer to 200usec */
3041 		ret_val = hw->phy.ops.acquire(hw);
3042 		if (ret_val)
3043 			return ret_val;
3044 		ret_val = e1000_write_emi_reg_locked(hw,
3045 						     I82579_LPI_UPDATE_TIMER,
3046 						     0x1387);
3047 		hw->phy.ops.release(hw);
3048 	}
3049 
3050 	return ret_val;
3051 }
3052 
3053 /**
3054  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3055  *  @hw: pointer to the HW structure
3056  *
3057  *  Resets the PHY
3058  *  This is a function pointer entry point called by drivers
3059  *  or other shared routines.
3060  **/
3061 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3062 {
3063 	s32 ret_val = E1000_SUCCESS;
3064 
3065 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3066 
3067 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
3068 	if ((hw->mac.type == e1000_pch2lan) &&
3069 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3070 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3071 
3072 	ret_val = e1000_phy_hw_reset_generic(hw);
3073 	if (ret_val)
3074 		return ret_val;
3075 
3076 	return e1000_post_phy_reset_ich8lan(hw);
3077 }
3078 
3079 /**
3080  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3081  *  @hw: pointer to the HW structure
3082  *  @active: TRUE to enable LPLU, FALSE to disable
3083  *
3084  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3085  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3086  *  the phy speed. This function will manually set the LPLU bit and restart
3087  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3088  *  since it configures the same bit.
3089  **/
3090 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3091 {
3092 	s32 ret_val;
3093 	u16 oem_reg;
3094 
3095 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
3096 
3097 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3098 	if (ret_val)
3099 		return ret_val;
3100 
3101 	if (active)
3102 		oem_reg |= HV_OEM_BITS_LPLU;
3103 	else
3104 		oem_reg &= ~HV_OEM_BITS_LPLU;
3105 
3106 	if (!hw->phy.ops.check_reset_block(hw))
3107 		oem_reg |= HV_OEM_BITS_RESTART_AN;
3108 
3109 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3110 }
3111 
3112 /**
3113  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3114  *  @hw: pointer to the HW structure
3115  *  @active: TRUE to enable LPLU, FALSE to disable
3116  *
3117  *  Sets the LPLU D0 state according to the active flag.  When
3118  *  activating LPLU this function also disables smart speed
3119  *  and vice versa.  LPLU will not be activated unless the
3120  *  device autonegotiation advertisement meets standards of
3121  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3122  *  This is a function pointer entry point only called by
3123  *  PHY setup routines.
3124  **/
3125 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3126 {
3127 	struct e1000_phy_info *phy = &hw->phy;
3128 	u32 phy_ctrl;
3129 	s32 ret_val = E1000_SUCCESS;
3130 	u16 data;
3131 
3132 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3133 
3134 	if (phy->type == e1000_phy_ife)
3135 		return E1000_SUCCESS;
3136 
3137 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3138 
3139 	if (active) {
3140 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3141 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3142 
3143 		if (phy->type != e1000_phy_igp_3)
3144 			return E1000_SUCCESS;
3145 
3146 		/* Call gig speed drop workaround on LPLU before accessing
3147 		 * any PHY registers
3148 		 */
3149 		if (hw->mac.type == e1000_ich8lan)
3150 			e1000_gig_downshift_workaround_ich8lan(hw);
3151 
3152 		/* When LPLU is enabled, we should disable SmartSpeed */
3153 		ret_val = phy->ops.read_reg(hw,
3154 					    IGP01E1000_PHY_PORT_CONFIG,
3155 					    &data);
3156 		if (ret_val)
3157 			return ret_val;
3158 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3159 		ret_val = phy->ops.write_reg(hw,
3160 					     IGP01E1000_PHY_PORT_CONFIG,
3161 					     data);
3162 		if (ret_val)
3163 			return ret_val;
3164 	} else {
3165 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3166 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3167 
3168 		if (phy->type != e1000_phy_igp_3)
3169 			return E1000_SUCCESS;
3170 
3171 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3172 		 * during Dx states where the power conservation is most
3173 		 * important.  During driver activity we should enable
3174 		 * SmartSpeed, so performance is maintained.
3175 		 */
3176 		if (phy->smart_speed == e1000_smart_speed_on) {
3177 			ret_val = phy->ops.read_reg(hw,
3178 						    IGP01E1000_PHY_PORT_CONFIG,
3179 						    &data);
3180 			if (ret_val)
3181 				return ret_val;
3182 
3183 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3184 			ret_val = phy->ops.write_reg(hw,
3185 						     IGP01E1000_PHY_PORT_CONFIG,
3186 						     data);
3187 			if (ret_val)
3188 				return ret_val;
3189 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3190 			ret_val = phy->ops.read_reg(hw,
3191 						    IGP01E1000_PHY_PORT_CONFIG,
3192 						    &data);
3193 			if (ret_val)
3194 				return ret_val;
3195 
3196 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3197 			ret_val = phy->ops.write_reg(hw,
3198 						     IGP01E1000_PHY_PORT_CONFIG,
3199 						     data);
3200 			if (ret_val)
3201 				return ret_val;
3202 		}
3203 	}
3204 
3205 	return E1000_SUCCESS;
3206 }
3207 
3208 /**
3209  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3210  *  @hw: pointer to the HW structure
3211  *  @active: TRUE to enable LPLU, FALSE to disable
3212  *
3213  *  Sets the LPLU D3 state according to the active flag.  When
3214  *  activating LPLU this function also disables smart speed
3215  *  and vice versa.  LPLU will not be activated unless the
3216  *  device autonegotiation advertisement meets standards of
3217  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3218  *  This is a function pointer entry point only called by
3219  *  PHY setup routines.
3220  **/
3221 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3222 {
3223 	struct e1000_phy_info *phy = &hw->phy;
3224 	u32 phy_ctrl;
3225 	s32 ret_val = E1000_SUCCESS;
3226 	u16 data;
3227 
3228 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3229 
3230 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3231 
3232 	if (!active) {
3233 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3234 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3235 
3236 		if (phy->type != e1000_phy_igp_3)
3237 			return E1000_SUCCESS;
3238 
3239 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3240 		 * during Dx states where the power conservation is most
3241 		 * important.  During driver activity we should enable
3242 		 * SmartSpeed, so performance is maintained.
3243 		 */
3244 		if (phy->smart_speed == e1000_smart_speed_on) {
3245 			ret_val = phy->ops.read_reg(hw,
3246 						    IGP01E1000_PHY_PORT_CONFIG,
3247 						    &data);
3248 			if (ret_val)
3249 				return ret_val;
3250 
3251 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3252 			ret_val = phy->ops.write_reg(hw,
3253 						     IGP01E1000_PHY_PORT_CONFIG,
3254 						     data);
3255 			if (ret_val)
3256 				return ret_val;
3257 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3258 			ret_val = phy->ops.read_reg(hw,
3259 						    IGP01E1000_PHY_PORT_CONFIG,
3260 						    &data);
3261 			if (ret_val)
3262 				return ret_val;
3263 
3264 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3265 			ret_val = phy->ops.write_reg(hw,
3266 						     IGP01E1000_PHY_PORT_CONFIG,
3267 						     data);
3268 			if (ret_val)
3269 				return ret_val;
3270 		}
3271 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3272 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3273 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3274 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3275 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3276 
3277 		if (phy->type != e1000_phy_igp_3)
3278 			return E1000_SUCCESS;
3279 
3280 		/* Call gig speed drop workaround on LPLU before accessing
3281 		 * any PHY registers
3282 		 */
3283 		if (hw->mac.type == e1000_ich8lan)
3284 			e1000_gig_downshift_workaround_ich8lan(hw);
3285 
3286 		/* When LPLU is enabled, we should disable SmartSpeed */
3287 		ret_val = phy->ops.read_reg(hw,
3288 					    IGP01E1000_PHY_PORT_CONFIG,
3289 					    &data);
3290 		if (ret_val)
3291 			return ret_val;
3292 
3293 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3294 		ret_val = phy->ops.write_reg(hw,
3295 					     IGP01E1000_PHY_PORT_CONFIG,
3296 					     data);
3297 	}
3298 
3299 	return ret_val;
3300 }
3301 
3302 /**
3303  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3304  *  @hw: pointer to the HW structure
3305  *  @bank:  pointer to the variable that returns the active bank
3306  *
3307  *  Reads signature byte from the NVM using the flash access registers.
3308  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3309  **/
3310 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3311 {
3312 	u32 eecd;
3313 	struct e1000_nvm_info *nvm = &hw->nvm;
3314 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3315 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3316 	u8 sig_byte = 0;
3317 	s32 ret_val;
3318 
3319 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3320 
3321 	switch (hw->mac.type) {
3322 	case e1000_pch_spt:
3323 		*bank = E1000_READ_REG(hw, E1000_CTRL_EXT) & E1000_CTRL_EXT_NVMVS;
3324 		if (*bank == 0 || *bank == 1) {
3325 			return -E1000_ERR_NVM;
3326 		} else {
3327 			*bank = *bank - 2;
3328 			return 0;
3329 		}
3330 		break;
3331 	case e1000_ich8lan:
3332 	case e1000_ich9lan:
3333 		eecd = E1000_READ_REG(hw, E1000_EECD);
3334 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3335 		    E1000_EECD_SEC1VAL_VALID_MASK) {
3336 			if (eecd & E1000_EECD_SEC1VAL)
3337 				*bank = 1;
3338 			else
3339 				*bank = 0;
3340 
3341 			return E1000_SUCCESS;
3342 		}
3343 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3344 		/* fall-thru */
3345 	default:
3346 		/* set bank to 0 in case flash read fails */
3347 		*bank = 0;
3348 
3349 		/* Check bank 0 */
3350 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3351 							&sig_byte);
3352 		if (ret_val)
3353 			return ret_val;
3354 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3355 		    E1000_ICH_NVM_SIG_VALUE) {
3356 			*bank = 0;
3357 			return E1000_SUCCESS;
3358 		}
3359 
3360 		/* Check bank 1 */
3361 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3362 							bank1_offset,
3363 							&sig_byte);
3364 		if (ret_val)
3365 			return ret_val;
3366 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3367 		    E1000_ICH_NVM_SIG_VALUE) {
3368 			*bank = 1;
3369 			return E1000_SUCCESS;
3370 		}
3371 
3372 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3373 		return -E1000_ERR_NVM;
3374 	}
3375 }
3376 
3377 /**
3378  *  e1000_read_nvm_spt - Read word(s) from the NVM
3379  *  @hw: pointer to the HW structure
3380  *  @offset: The offset (in bytes) of the word(s) to read.
3381  *  @words: Size of data to read in words
3382  *  @data: Pointer to the word(s) to read at offset.
3383  *
3384  *  Reads a word(s) from the NVM using the flash access registers.
3385  **/
3386 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3387 			      u16 *data)
3388 {
3389 	struct e1000_nvm_info *nvm = &hw->nvm;
3390 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3391 	u32 act_offset;
3392 	s32 ret_val = E1000_SUCCESS;
3393 	u32 bank = 0;
3394 	u32 dword;
3395 	u16 use_offset;
3396 	u16 i;
3397 
3398 	DEBUGFUNC("e1000_read_nvm_spt");
3399 
3400 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3401 	    (words == 0)) {
3402 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3403 		ret_val = -E1000_ERR_NVM;
3404 		goto out;
3405 	}
3406 
3407 	nvm->ops.acquire(hw);
3408 
3409 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3410 	if (ret_val != E1000_SUCCESS) {
3411 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3412 		bank = 0;
3413 	}
3414 
3415 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3416 	act_offset += offset;
3417 
3418 	ret_val = E1000_SUCCESS;
3419 	for (i = 0; i < words; i += 2) {
3420 		if (words - i == 1) {
3421 			if (dev_spec->shadow_ram[offset+i].modified) {
3422 				data[i] = dev_spec->shadow_ram[offset+i].value;
3423 			} else {
3424 				use_offset = act_offset + i -
3425 					     (act_offset + i) % 2;
3426 				ret_val = e1000_read_flash_dword_ich8lan(
3427 							hw,
3428 							use_offset,
3429 							&dword);
3430 				if (ret_val)
3431 					break;
3432 				if ((act_offset + i) % 2 == 0)
3433 					data[i] = (u16)(dword & 0xFFFF);
3434 				else
3435 					data[i] = (u16)((dword >> 16) & 0xFFFF);
3436 			}
3437 		} else {
3438 			use_offset = act_offset + i;
3439 			if (!(dev_spec->shadow_ram[offset + i].modified) ||
3440 			    !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3441 				ret_val =
3442 				 e1000_read_flash_dword_ich8lan(hw,
3443 					use_offset, &dword);
3444 				if (ret_val)
3445 					break;
3446 			}
3447 			if (dev_spec->shadow_ram[offset + i].modified)
3448 				data[i] = dev_spec->shadow_ram[offset + i].value;
3449 			else
3450 				data[i] = (u16)(dword & 0xFFFF);
3451 			if (dev_spec->shadow_ram[offset + i].modified)
3452 				data[i + 1] =
3453 				    dev_spec->shadow_ram[offset + i + 1].value;
3454 			else
3455 				data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3456 		}
3457 	}
3458 
3459 	nvm->ops.release(hw);
3460 
3461 out:
3462 	if (ret_val)
3463 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3464 
3465 	return ret_val;
3466 }
3467 
3468 /**
3469  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3470  *  @hw: pointer to the HW structure
3471  *  @offset: The offset (in bytes) of the word(s) to read.
3472  *  @words: Size of data to read in words
3473  *  @data: Pointer to the word(s) to read at offset.
3474  *
3475  *  Reads a word(s) from the NVM using the flash access registers.
3476  **/
3477 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3478 				  u16 *data)
3479 {
3480 	struct e1000_nvm_info *nvm = &hw->nvm;
3481 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3482 	u32 act_offset;
3483 	s32 ret_val = E1000_SUCCESS;
3484 	u32 bank = 0;
3485 	u16 i, word;
3486 
3487 	DEBUGFUNC("e1000_read_nvm_ich8lan");
3488 
3489 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3490 	    (words == 0)) {
3491 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3492 		ret_val = -E1000_ERR_NVM;
3493 		goto out;
3494 	}
3495 
3496 	nvm->ops.acquire(hw);
3497 
3498 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3499 	if (ret_val != E1000_SUCCESS) {
3500 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3501 		bank = 0;
3502 	}
3503 
3504 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3505 	act_offset += offset;
3506 
3507 	ret_val = E1000_SUCCESS;
3508 	for (i = 0; i < words; i++) {
3509 		if (dev_spec->shadow_ram[offset+i].modified) {
3510 			data[i] = dev_spec->shadow_ram[offset+i].value;
3511 		} else {
3512 			ret_val = e1000_read_flash_word_ich8lan(hw,
3513 								act_offset + i,
3514 								&word);
3515 			if (ret_val)
3516 				break;
3517 			data[i] = word;
3518 		}
3519 	}
3520 
3521 	nvm->ops.release(hw);
3522 
3523 out:
3524 	if (ret_val)
3525 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3526 
3527 	return ret_val;
3528 }
3529 
3530 /**
3531  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3532  *  @hw: pointer to the HW structure
3533  *
3534  *  This function does initial flash setup so that a new read/write/erase cycle
3535  *  can be started.
3536  **/
3537 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3538 {
3539 	union ich8_hws_flash_status hsfsts;
3540 	s32 ret_val = -E1000_ERR_NVM;
3541 
3542 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3543 
3544 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3545 
3546 	/* Check if the flash descriptor is valid */
3547 	if (!hsfsts.hsf_status.fldesvalid) {
3548 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3549 		return -E1000_ERR_NVM;
3550 	}
3551 
3552 	/* Clear FCERR and DAEL in hw status by writing 1 */
3553 	hsfsts.hsf_status.flcerr = 1;
3554 	hsfsts.hsf_status.dael = 1;
3555 	if (hw->mac.type == e1000_pch_spt)
3556 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3557 	else
3558 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3559 
3560 	/* Either we should have a hardware SPI cycle in progress
3561 	 * bit to check against, in order to start a new cycle or
3562 	 * FDONE bit should be changed in the hardware so that it
3563 	 * is 1 after hardware reset, which can then be used as an
3564 	 * indication whether a cycle is in progress or has been
3565 	 * completed.
3566 	 */
3567 
3568 	if (!hsfsts.hsf_status.flcinprog) {
3569 		/* There is no cycle running at present,
3570 		 * so we can start a cycle.
3571 		 * Begin by setting Flash Cycle Done.
3572 		 */
3573 		hsfsts.hsf_status.flcdone = 1;
3574 		if (hw->mac.type == e1000_pch_spt)
3575 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3576 		else
3577 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3578 		ret_val = E1000_SUCCESS;
3579 	} else {
3580 		s32 i;
3581 
3582 		/* Otherwise poll for sometime so the current
3583 		 * cycle has a chance to end before giving up.
3584 		 */
3585 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3586 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3587 							      ICH_FLASH_HSFSTS);
3588 			if (!hsfsts.hsf_status.flcinprog) {
3589 				ret_val = E1000_SUCCESS;
3590 				break;
3591 			}
3592 			usec_delay(1);
3593 		}
3594 		if (ret_val == E1000_SUCCESS) {
3595 			/* Successful in waiting for previous cycle to timeout,
3596 			 * now set the Flash Cycle Done.
3597 			 */
3598 			hsfsts.hsf_status.flcdone = 1;
3599 			if (hw->mac.type == e1000_pch_spt)
3600 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3601 							hsfsts.regval & 0xFFFF);
3602 			else
3603 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3604 							hsfsts.regval);
3605 		} else {
3606 			DEBUGOUT("Flash controller busy, cannot get access\n");
3607 		}
3608 	}
3609 
3610 	return ret_val;
3611 }
3612 
3613 /**
3614  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3615  *  @hw: pointer to the HW structure
3616  *  @timeout: maximum time to wait for completion
3617  *
3618  *  This function starts a flash cycle and waits for its completion.
3619  **/
3620 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3621 {
3622 	union ich8_hws_flash_ctrl hsflctl;
3623 	union ich8_hws_flash_status hsfsts;
3624 	u32 i = 0;
3625 
3626 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3627 
3628 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3629 	if (hw->mac.type == e1000_pch_spt)
3630 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
3631 	else
3632 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3633 	hsflctl.hsf_ctrl.flcgo = 1;
3634 
3635 	if (hw->mac.type == e1000_pch_spt)
3636 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3637 	else
3638 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3639 
3640 	/* wait till FDONE bit is set to 1 */
3641 	do {
3642 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3643 		if (hsfsts.hsf_status.flcdone)
3644 			break;
3645 		usec_delay(1);
3646 	} while (i++ < timeout);
3647 
3648 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3649 		return E1000_SUCCESS;
3650 
3651 	return -E1000_ERR_NVM;
3652 }
3653 
3654 /**
3655  *  e1000_read_flash_word_ich8lan - Read word from flash
3656  *  @hw: pointer to the HW structure
3657  *  @offset: offset to data location
3658  *  @data: pointer to the location for storing the data
3659  *
3660  *  Reads the flash word at offset into data.  Offset is converted
3661  *  to bytes before read.
3662  **/
3663 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3664 					 u16 *data)
3665 {
3666 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3667 
3668 	if (!data)
3669 		return -E1000_ERR_NVM;
3670 
3671 	/* Must convert offset into bytes. */
3672 	offset <<= 1;
3673 
3674 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3675 }
3676 
3677 /**
3678  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3679  *  @hw: pointer to the HW structure
3680  *  @offset: offset to data location
3681  *  @data: pointer to the location for storing the data
3682  *
3683  *  Reads the flash word at offset into data.  Offset is converted
3684  *  to bytes before read.
3685  **/
3686 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3687 					 u32 *data)
3688 {
3689 	DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3690 
3691 	if (!data)
3692 		return -E1000_ERR_NVM;
3693 
3694 	/* Must convert offset into bytes. */
3695 	offset <<= 1;
3696 
3697 	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3698 }
3699 
3700 /**
3701  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3702  *  @hw: pointer to the HW structure
3703  *  @offset: The offset of the byte to read.
3704  *  @data: Pointer to a byte to store the value read.
3705  *
3706  *  Reads a single byte from the NVM using the flash access registers.
3707  **/
3708 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3709 					 u8 *data)
3710 {
3711 	s32 ret_val;
3712 	u16 word = 0;
3713 
3714 	if (hw->mac.type == e1000_pch_spt)
3715 		return -E1000_ERR_NVM;
3716 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3717 
3718 	if (ret_val)
3719 		return ret_val;
3720 
3721 	*data = (u8)word;
3722 
3723 	return E1000_SUCCESS;
3724 }
3725 
3726 /**
3727  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3728  *  @hw: pointer to the HW structure
3729  *  @offset: The offset (in bytes) of the byte or word to read.
3730  *  @size: Size of data to read, 1=byte 2=word
3731  *  @data: Pointer to the word to store the value read.
3732  *
3733  *  Reads a byte or word from the NVM using the flash access registers.
3734  **/
3735 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3736 					 u8 size, u16 *data)
3737 {
3738 	union ich8_hws_flash_status hsfsts;
3739 	union ich8_hws_flash_ctrl hsflctl;
3740 	u32 flash_linear_addr;
3741 	u32 flash_data = 0;
3742 	s32 ret_val = -E1000_ERR_NVM;
3743 	u8 count = 0;
3744 
3745 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3746 
3747 	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3748 		return -E1000_ERR_NVM;
3749 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3750 			     hw->nvm.flash_base_addr);
3751 
3752 	do {
3753 		usec_delay(1);
3754 		/* Steps */
3755 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3756 		if (ret_val != E1000_SUCCESS)
3757 			break;
3758 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3759 
3760 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3761 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3762 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3763 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3764 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3765 
3766 		ret_val = e1000_flash_cycle_ich8lan(hw,
3767 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3768 
3769 		/* Check if FCERR is set to 1, if set to 1, clear it
3770 		 * and try the whole sequence a few more times, else
3771 		 * read in (shift in) the Flash Data0, the order is
3772 		 * least significant byte first msb to lsb
3773 		 */
3774 		if (ret_val == E1000_SUCCESS) {
3775 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3776 			if (size == 1)
3777 				*data = (u8)(flash_data & 0x000000FF);
3778 			else if (size == 2)
3779 				*data = (u16)(flash_data & 0x0000FFFF);
3780 			break;
3781 		} else {
3782 			/* If we've gotten here, then things are probably
3783 			 * completely hosed, but if the error condition is
3784 			 * detected, it won't hurt to give it another try...
3785 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3786 			 */
3787 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3788 							      ICH_FLASH_HSFSTS);
3789 			if (hsfsts.hsf_status.flcerr) {
3790 				/* Repeat for some time before giving up. */
3791 				continue;
3792 			} else if (!hsfsts.hsf_status.flcdone) {
3793 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3794 				break;
3795 			}
3796 		}
3797 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3798 
3799 	return ret_val;
3800 }
3801 
3802 /**
3803  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3804  *  @hw: pointer to the HW structure
3805  *  @offset: The offset (in bytes) of the byte or word to read.
3806  *  @size: Size of data to read, 1=byte 2=word
3807  *  @data: Pointer to the word to store the value read.
3808  *
3809  *  Reads a byte or word from the NVM using the flash access registers.
3810  **/
3811 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3812 					 u32 *data)
3813 {
3814 	union ich8_hws_flash_status hsfsts;
3815 	union ich8_hws_flash_ctrl hsflctl;
3816 	u32 flash_linear_addr;
3817 	s32 ret_val = -E1000_ERR_NVM;
3818 	u8 count = 0;
3819 
3820 	DEBUGFUNC("e1000_read_flash_data32_ich8lan");
3821 
3822 	if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3823 	    hw->mac.type != e1000_pch_spt)
3824 		return -E1000_ERR_NVM;
3825 
3826 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3827 			     hw->nvm.flash_base_addr);
3828 
3829 	do {
3830 		usec_delay(1);
3831 		/* Steps */
3832 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3833 		if (ret_val != E1000_SUCCESS)
3834 			break;
3835 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
3836 
3837 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3838 		hsflctl.hsf_ctrl.fldbcount = sizeof(int32_t) - 1;
3839 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3840 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3841 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3842 
3843 		ret_val = e1000_flash_cycle_ich8lan(hw,
3844 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3845 
3846 		/* Check if FCERR is set to 1, if set to 1, clear it
3847 		 * and try the whole sequence a few more times, else
3848 		 * read in (shift in) the Flash Data0, the order is
3849 		 * least significant byte first msb to lsb
3850 		 */
3851 		if (ret_val == E1000_SUCCESS) {
3852 			*data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3853 			break;
3854 		} else {
3855 			/* If we've gotten here, then things are probably
3856 			 * completely hosed, but if the error condition is
3857 			 * detected, it won't hurt to give it another try...
3858 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3859 			 */
3860 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3861 							      ICH_FLASH_HSFSTS);
3862 			if (hsfsts.hsf_status.flcerr) {
3863 				/* Repeat for some time before giving up. */
3864 				continue;
3865 			} else if (!hsfsts.hsf_status.flcdone) {
3866 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3867 				break;
3868 			}
3869 		}
3870 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3871 
3872 	return ret_val;
3873 }
3874 
3875 
3876 /**
3877  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3878  *  @hw: pointer to the HW structure
3879  *  @offset: The offset (in bytes) of the word(s) to write.
3880  *  @words: Size of data to write in words
3881  *  @data: Pointer to the word(s) to write at offset.
3882  *
3883  *  Writes a byte or word to the NVM using the flash access registers.
3884  **/
3885 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3886 				   u16 *data)
3887 {
3888 	struct e1000_nvm_info *nvm = &hw->nvm;
3889 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3890 	u16 i;
3891 
3892 	DEBUGFUNC("e1000_write_nvm_ich8lan");
3893 
3894 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3895 	    (words == 0)) {
3896 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3897 		return -E1000_ERR_NVM;
3898 	}
3899 
3900 	nvm->ops.acquire(hw);
3901 
3902 	for (i = 0; i < words; i++) {
3903 		dev_spec->shadow_ram[offset+i].modified = TRUE;
3904 		dev_spec->shadow_ram[offset+i].value = data[i];
3905 	}
3906 
3907 	nvm->ops.release(hw);
3908 
3909 	return E1000_SUCCESS;
3910 }
3911 
3912 /**
3913  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
3914  *  @hw: pointer to the HW structure
3915  *
3916  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3917  *  which writes the checksum to the shadow ram.  The changes in the shadow
3918  *  ram are then committed to the EEPROM by processing each bank at a time
3919  *  checking for the modified bit and writing only the pending changes.
3920  *  After a successful commit, the shadow ram is cleared and is ready for
3921  *  future writes.
3922  **/
3923 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3924 {
3925 	struct e1000_nvm_info *nvm = &hw->nvm;
3926 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3927 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3928 	s32 ret_val;
3929 	u32 data32 = 0;
3930 
3931 	DEBUGFUNC("e1000_update_nvm_checksum_spt");
3932 
3933 	ret_val = e1000_update_nvm_checksum_generic(hw);
3934 	if (ret_val)
3935 		goto out;
3936 
3937 	if (nvm->type != e1000_nvm_flash_sw)
3938 		goto out;
3939 
3940 	nvm->ops.acquire(hw);
3941 
3942 	/* We're writing to the opposite bank so if we're on bank 1,
3943 	 * write to bank 0 etc.  We also need to erase the segment that
3944 	 * is going to be written
3945 	 */
3946 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3947 	if (ret_val != E1000_SUCCESS) {
3948 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3949 		bank = 0;
3950 	}
3951 
3952 	if (bank == 0) {
3953 		new_bank_offset = nvm->flash_bank_size;
3954 		old_bank_offset = 0;
3955 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3956 		if (ret_val)
3957 			goto release;
3958 	} else {
3959 		old_bank_offset = nvm->flash_bank_size;
3960 		new_bank_offset = 0;
3961 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3962 		if (ret_val)
3963 			goto release;
3964 	}
3965 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
3966 		/* Determine whether to write the value stored
3967 		 * in the other NVM bank or a modified value stored
3968 		 * in the shadow RAM
3969 		 */
3970 		ret_val = e1000_read_flash_dword_ich8lan(hw,
3971 							 i + old_bank_offset,
3972 							 &data32);
3973 		if (dev_spec->shadow_ram[i].modified) {
3974 			data32 &= 0xFFFF0000;
3975 			data32 |= dev_spec->shadow_ram[i].value & 0xffff;
3976 		}
3977 		if (dev_spec->shadow_ram[i + 1].modified) {
3978 			data32 &= 0x0000FFFF;
3979 			data32 |= (dev_spec->shadow_ram[i + 1].value & 0xffff)
3980 				  << 16;
3981 		}
3982 		if (ret_val)
3983 			break;
3984 
3985 		/* If the word is 0x13, then make sure the signature bits
3986 		 * (15:14) are 11b until the commit has completed.
3987 		 * This will allow us to write 10b which indicates the
3988 		 * signature is valid.  We want to do this after the write
3989 		 * has completed so that we don't mark the segment valid
3990 		 * while the write is still in progress
3991 		 */
3992 		if (i == E1000_ICH_NVM_SIG_WORD - 1)
3993 			data32 |= E1000_ICH_NVM_SIG_MASK << 16;
3994 
3995 		/* Convert offset to bytes. */
3996 		/*act_offset = (i + new_bank_offset) << 1;*/
3997 
3998 		usec_delay(100);
3999 
4000 		/* Write the bytes to the new bank. */
4001 		act_offset = i + new_bank_offset;
4002 		ret_val = e1000_retry_write_flash_dword_ich8lan(hw,
4003 							       act_offset,
4004 							       data32);
4005 		if (ret_val)
4006 			break;
4007 	 }
4008 
4009 	/* Don't bother writing the segment valid bits if sector
4010 	 * programming failed.
4011 	 */
4012 	if (ret_val) {
4013 		DEBUGOUT("Flash commit failed.\n");
4014 		goto release;
4015 	}
4016 
4017 	/* Finally validate the new segment by setting bit 15:14
4018 	 * to 10b in word 0x13 , this can be done without an
4019 	 * erase as well since these bits are 11 to start with
4020 	 * and we need to change bit 14 to 0b
4021 	 */
4022 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4023 
4024 	/*offset in words but we read dword */
4025 	--act_offset;
4026 
4027 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &data32);
4028 	if (ret_val)
4029 		goto release;
4030 
4031 	data32 &= 0xBFFFFFFF;
4032 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset * 2 + 1,
4033 							data32);
4034 	if (ret_val)
4035 		goto release;
4036 
4037 	/* And invalidate the previously valid segment by setting
4038 	 * its signature word (0x13) high_byte to 0b. This can be
4039 	 * done without an erase because flash erase sets all bits
4040 	 * to 1's. We can write 1's to 0's without an erase
4041 	 */
4042 	/*act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;*/
4043 
4044 	/* offset in words but we read dwords */
4045 	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4046 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &data32);
4047 
4048 	if (ret_val)
4049 		goto release;
4050 
4051 	/* Great!  Everything worked, we can now clear the cached entries. */
4052 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4053 		dev_spec->shadow_ram[i].modified = FALSE;
4054 		dev_spec->shadow_ram[i].value = 0xFFFF;
4055 	}
4056 
4057 release:
4058 	nvm->ops.release(hw);
4059 
4060 	/* Reload the EEPROM, or else modifications will not appear
4061 	 * until after the next adapter reset.
4062 	 */
4063 	if (!ret_val) {
4064 		nvm->ops.reload(hw);
4065 		msec_delay(10);
4066 	}
4067 
4068 out:
4069 	if (ret_val)
4070 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4071 
4072 	return ret_val;
4073 }
4074 
4075 /**
4076  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4077  *  @hw: pointer to the HW structure
4078  *
4079  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4080  *  which writes the checksum to the shadow ram.  The changes in the shadow
4081  *  ram are then committed to the EEPROM by processing each bank at a time
4082  *  checking for the modified bit and writing only the pending changes.
4083  *  After a successful commit, the shadow ram is cleared and is ready for
4084  *  future writes.
4085  **/
4086 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4087 {
4088 	struct e1000_nvm_info *nvm = &hw->nvm;
4089 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4090 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4091 	s32 ret_val;
4092 	u16 data = 0;
4093 
4094 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4095 
4096 	ret_val = e1000_update_nvm_checksum_generic(hw);
4097 	if (ret_val)
4098 		goto out;
4099 
4100 	if (nvm->type != e1000_nvm_flash_sw)
4101 		goto out;
4102 
4103 	nvm->ops.acquire(hw);
4104 
4105 	/* We're writing to the opposite bank so if we're on bank 1,
4106 	 * write to bank 0 etc.  We also need to erase the segment that
4107 	 * is going to be written
4108 	 */
4109 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4110 	if (ret_val != E1000_SUCCESS) {
4111 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4112 		bank = 0;
4113 	}
4114 
4115 	if (bank == 0) {
4116 		new_bank_offset = nvm->flash_bank_size;
4117 		old_bank_offset = 0;
4118 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4119 		if (ret_val)
4120 			goto release;
4121 	} else {
4122 		old_bank_offset = nvm->flash_bank_size;
4123 		new_bank_offset = 0;
4124 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4125 		if (ret_val)
4126 			goto release;
4127 	}
4128 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4129 		if (dev_spec->shadow_ram[i].modified) {
4130 			data = dev_spec->shadow_ram[i].value;
4131 		} else {
4132 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
4133 								old_bank_offset,
4134 								&data);
4135 			if (ret_val)
4136 				break;
4137 		}
4138 		/* If the word is 0x13, then make sure the signature bits
4139 		 * (15:14) are 11b until the commit has completed.
4140 		 * This will allow us to write 10b which indicates the
4141 		 * signature is valid.  We want to do this after the write
4142 		 * has completed so that we don't mark the segment valid
4143 		 * while the write is still in progress
4144 		 */
4145 		if (i == E1000_ICH_NVM_SIG_WORD)
4146 			data |= E1000_ICH_NVM_SIG_MASK;
4147 
4148 		/* Convert offset to bytes. */
4149 		act_offset = (i + new_bank_offset) << 1;
4150 
4151 		usec_delay(100);
4152 
4153 		/* Write the bytes to the new bank. */
4154 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4155 							       act_offset,
4156 							       (u8)data);
4157 		if (ret_val)
4158 			break;
4159 
4160 		usec_delay(100);
4161 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4162 							  act_offset + 1,
4163 							  (u8)(data >> 8));
4164 		if (ret_val)
4165 			break;
4166 	 }
4167 
4168 	/* Don't bother writing the segment valid bits if sector
4169 	 * programming failed.
4170 	 */
4171 	if (ret_val) {
4172 		DEBUGOUT("Flash commit failed.\n");
4173 		goto release;
4174 	}
4175 
4176 	/* Finally validate the new segment by setting bit 15:14
4177 	 * to 10b in word 0x13 , this can be done without an
4178 	 * erase as well since these bits are 11 to start with
4179 	 * and we need to change bit 14 to 0b
4180 	 */
4181 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4182 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4183 	if (ret_val)
4184 		goto release;
4185 
4186 	data &= 0xBFFF;
4187 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4188 						       (u8)(data >> 8));
4189 	if (ret_val)
4190 		goto release;
4191 
4192 	/* And invalidate the previously valid segment by setting
4193 	 * its signature word (0x13) high_byte to 0b. This can be
4194 	 * done without an erase because flash erase sets all bits
4195 	 * to 1's. We can write 1's to 0's without an erase
4196 	 */
4197 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4198 
4199 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4200 
4201 	if (ret_val)
4202 		goto release;
4203 
4204 	/* Great!  Everything worked, we can now clear the cached entries. */
4205 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4206 		dev_spec->shadow_ram[i].modified = FALSE;
4207 		dev_spec->shadow_ram[i].value = 0xFFFF;
4208 	}
4209 
4210 release:
4211 	nvm->ops.release(hw);
4212 
4213 	/* Reload the EEPROM, or else modifications will not appear
4214 	 * until after the next adapter reset.
4215 	 */
4216 	if (!ret_val) {
4217 		nvm->ops.reload(hw);
4218 		msec_delay(10);
4219 	}
4220 
4221 out:
4222 	if (ret_val)
4223 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4224 
4225 	return ret_val;
4226 }
4227 
4228 /**
4229  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4230  *  @hw: pointer to the HW structure
4231  *
4232  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4233  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4234  *  calculated, in which case we need to calculate the checksum and set bit 6.
4235  **/
4236 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4237 {
4238 	s32 ret_val;
4239 	u16 data;
4240 	u16 word;
4241 	u16 valid_csum_mask;
4242 
4243 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4244 
4245 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4246 	 * the checksum needs to be fixed.  This bit is an indication that
4247 	 * the NVM was prepared by OEM software and did not calculate
4248 	 * the checksum...a likely scenario.
4249 	 */
4250 	switch (hw->mac.type) {
4251 	case e1000_pch_lpt:
4252 	case e1000_pch_spt:
4253 		word = NVM_COMPAT;
4254 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4255 		break;
4256 	default:
4257 		word = NVM_FUTURE_INIT_WORD1;
4258 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4259 		break;
4260 	}
4261 
4262 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4263 	if (ret_val)
4264 		return ret_val;
4265 
4266 	if (!(data & valid_csum_mask)) {
4267 		data |= valid_csum_mask;
4268 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4269 		if (ret_val)
4270 			return ret_val;
4271 		ret_val = hw->nvm.ops.update(hw);
4272 		if (ret_val)
4273 			return ret_val;
4274 	}
4275 
4276 	return e1000_validate_nvm_checksum_generic(hw);
4277 }
4278 
4279 /**
4280  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4281  *  @hw: pointer to the HW structure
4282  *  @offset: The offset (in bytes) of the byte/word to read.
4283  *  @size: Size of data to read, 1=byte 2=word
4284  *  @data: The byte(s) to write to the NVM.
4285  *
4286  *  Writes one/two bytes to the NVM using the flash access registers.
4287  **/
4288 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4289 					  u8 size, u16 data)
4290 {
4291 	union ich8_hws_flash_status hsfsts;
4292 	union ich8_hws_flash_ctrl hsflctl;
4293 	u32 flash_linear_addr;
4294 	u32 flash_data = 0;
4295 	s32 ret_val;
4296 	u8 count = 0;
4297 
4298 	DEBUGFUNC("e1000_write_ich8_data");
4299 
4300 	if (hw->mac.type == e1000_pch_spt) {
4301 		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4302 			return -E1000_ERR_NVM;
4303 	} else {
4304 		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4305 			return -E1000_ERR_NVM;
4306 	}
4307 
4308 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4309 			     hw->nvm.flash_base_addr);
4310 
4311 	do {
4312 		usec_delay(1);
4313 		/* Steps */
4314 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4315 		if (ret_val != E1000_SUCCESS)
4316 			break;
4317 		if (hw->mac.type == e1000_pch_spt)
4318 			hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4319 		else
4320 			hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4321 
4322 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4323 		hsflctl.hsf_ctrl.fldbcount = size - 1;
4324 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4325 		if (hw->mac.type == e1000_pch_spt)
4326 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4327 		else
4328 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
4329 
4330 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4331 
4332 		if (size == 1)
4333 			flash_data = (u32)data & 0x00FF;
4334 		else
4335 			flash_data = (u32)data;
4336 
4337 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4338 
4339 		/* check if FCERR is set to 1 , if set to 1, clear it
4340 		 * and try the whole sequence a few more times else done
4341 		 */
4342 		ret_val =
4343 		    e1000_flash_cycle_ich8lan(hw,
4344 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4345 		if (ret_val == E1000_SUCCESS)
4346 			break;
4347 
4348 		/* If we're here, then things are most likely
4349 		 * completely hosed, but if the error condition
4350 		 * is detected, it won't hurt to give it another
4351 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4352 		 */
4353 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4354 		if (hsfsts.hsf_status.flcerr)
4355 			/* Repeat for some time before giving up. */
4356 			continue;
4357 		if (!hsfsts.hsf_status.flcdone) {
4358 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4359 			break;
4360 		}
4361 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4362 
4363 	return ret_val;
4364 }
4365 
4366 /**
4367  *  e1000_write_flash_data32_ich8lan - Writes 32-bit words to the NVM
4368  *  @hw: pointer to the HW structure
4369  *  @offset: The offset (in bytes) of the 32-bit word to read.
4370  *  @data: The byte(s) to write to the NVM.
4371  *
4372  *  Writes one/two bytes to the NVM using the flash access registers.
4373  **/
4374 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4375 					    u32 data)
4376 {
4377 	union ich8_hws_flash_status hsfsts;
4378 	union ich8_hws_flash_ctrl hsflctl;
4379 	u32 flash_linear_addr;
4380 	s32 ret_val;
4381 	u8 count = 0;
4382 
4383 	DEBUGFUNC("e1000_write_ich8_data");
4384 
4385 	if (hw->mac.type == e1000_pch_spt) {
4386 		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4387 			return -E1000_ERR_NVM;
4388 	}
4389 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4390 			     hw->nvm.flash_base_addr);
4391 
4392 	do {
4393 		usec_delay(1);
4394 		/* Steps */
4395 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4396 		if (ret_val != E1000_SUCCESS)
4397 			break;
4398 		if (hw->mac.type == e1000_pch_spt) {
4399 			hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4400 		} else {
4401 			hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4402 		}
4403 
4404 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4405 		hsflctl.hsf_ctrl.fldbcount = sizeof(int32_t) - 1;
4406 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4407 
4408 		/* In SPT, This register is in Lan memory space,
4409 		 * not flash.  Therefore, only 32 bit access is
4410 		 * supported
4411 		 */
4412 		if (hw->mac.type == e1000_pch_spt) {
4413 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4414 		} else {
4415 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
4416 		}
4417 
4418 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4419 
4420 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4421 
4422 		/* check if FCERR is set to 1 , if set to 1, clear it
4423 		 * and try the whole sequence a few more times else done
4424 		 */
4425 		ret_val =
4426 		    e1000_flash_cycle_ich8lan(hw,
4427 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4428 		if (ret_val == E1000_SUCCESS)
4429 			break;
4430 
4431 		/* If we're here, then things are most likely
4432 		 * completely hosed, but if the error condition
4433 		 * is detected, it won't hurt to give it another
4434 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4435 		 */
4436 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4437 		if (hsfsts.hsf_status.flcerr)
4438 			/* Repeat for some time before giving up. */
4439 			continue;
4440 		if (!hsfsts.hsf_status.flcdone) {
4441 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4442 			break;
4443 		}
4444 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4445 
4446 	return ret_val;
4447 }
4448 
4449 
4450 /**
4451  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4452  *  @hw: pointer to the HW structure
4453  *  @offset: The index of the byte to read.
4454  *  @data: The byte to write to the NVM.
4455  *
4456  *  Writes a single byte to the NVM using the flash access registers.
4457  **/
4458 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4459 					  u8 data)
4460 {
4461 	u16 word = (u16)data;
4462 
4463 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4464 
4465 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4466 }
4467 
4468 /**
4469  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4470  *  @hw: pointer to the HW structure
4471  *  @offset: The offset of the byte to write.
4472  *  @byte: The byte to write to the NVM.
4473  *
4474  *  Writes a single byte to the NVM using the flash access registers.
4475  *  Goes through a retry algorithm before giving up.
4476  **/
4477 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4478 						u32 offset, u8 byte)
4479 {
4480 	s32 ret_val;
4481 	u16 program_retries;
4482 
4483 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4484 
4485 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4486 	if (!ret_val)
4487 		return ret_val;
4488 
4489 	for (program_retries = 0; program_retries < 100; program_retries++) {
4490 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4491 		usec_delay(100);
4492 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4493 		if (ret_val == E1000_SUCCESS)
4494 			break;
4495 	}
4496 	if (program_retries == 100)
4497 		return -E1000_ERR_NVM;
4498 
4499 	return E1000_SUCCESS;
4500 }
4501 
4502 /**
4503  *  e1000_retry_write_flash_dword_ich8lan - Writes a 32-bit word to NVM
4504  *  @hw: pointer to the HW structure
4505  *  @offset: The offset of the byte to write.
4506  *  @dword: The dword to write to the NVM.
4507  *
4508  *  Writes a single 32-bit word to the NVM using the flash access registers.
4509  *  Goes through a retry algorithm before giving up.
4510  **/
4511 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4512 						u32 offset, u32 dword)
4513 {
4514 	s32 ret_val;
4515 	u16 program_retries;
4516 
4517 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4518 
4519 	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4520 	if (!ret_val)
4521 		return ret_val;
4522 
4523 	for (program_retries = 0; program_retries < 100; program_retries++) {
4524 		DEBUGOUT2("Retrying DWord %08X at offset %u\n", dword, offset);
4525 		usec_delay(100);
4526 		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4527 		if (ret_val == E1000_SUCCESS)
4528 			break;
4529 	}
4530 	if (program_retries == 100)
4531 		return -E1000_ERR_NVM;
4532 
4533 	return E1000_SUCCESS;
4534 }
4535 
4536 /**
4537  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4538  *  @hw: pointer to the HW structure
4539  *  @bank: 0 for first bank, 1 for second bank, etc.
4540  *
4541  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4542  *  bank N is 4096 * N + flash_reg_addr.
4543  **/
4544 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4545 {
4546 	struct e1000_nvm_info *nvm = &hw->nvm;
4547 	union ich8_hws_flash_status hsfsts;
4548 	union ich8_hws_flash_ctrl hsflctl;
4549 	u32 flash_linear_addr;
4550 	/* bank size is in 16bit words - adjust to bytes */
4551 	u32 flash_bank_size = nvm->flash_bank_size * 2;
4552 	s32 ret_val;
4553 	s32 count = 0;
4554 	s32 j, iteration, sector_size;
4555 
4556 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4557 
4558 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4559 
4560 	/* Determine HW Sector size: Read BERASE bits of hw flash status
4561 	 * register
4562 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4563 	 *     consecutive sectors.  The start index for the nth Hw sector
4564 	 *     can be calculated as = bank * 4096 + n * 256
4565 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4566 	 *     The start index for the nth Hw sector can be calculated
4567 	 *     as = bank * 4096
4568 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4569 	 *     (ich9 only, otherwise error condition)
4570 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4571 	 */
4572 	switch (hsfsts.hsf_status.berasesz) {
4573 	case 0:
4574 		/* Hw sector size 256 */
4575 		sector_size = ICH_FLASH_SEG_SIZE_256;
4576 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4577 		break;
4578 	case 1:
4579 		sector_size = ICH_FLASH_SEG_SIZE_4K;
4580 		iteration = 1;
4581 		break;
4582 	case 2:
4583 		sector_size = ICH_FLASH_SEG_SIZE_8K;
4584 		iteration = 1;
4585 		break;
4586 	case 3:
4587 		sector_size = ICH_FLASH_SEG_SIZE_64K;
4588 		iteration = 1;
4589 		break;
4590 	default:
4591 		return -E1000_ERR_NVM;
4592 	}
4593 
4594 	/* Start with the base address, then add the sector offset. */
4595 	flash_linear_addr = hw->nvm.flash_base_addr;
4596 	flash_linear_addr += (bank) ? flash_bank_size : 0;
4597 
4598 	for (j = 0; j < iteration; j++) {
4599 		do {
4600 			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4601 
4602 			/* Steps */
4603 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4604 			if (ret_val)
4605 				return ret_val;
4606 
4607 			/* Write a value 11 (block Erase) in Flash
4608 			 * Cycle field in hw flash control
4609 			 */
4610 			if (hw->mac.type == e1000_pch_spt)
4611 				hsflctl.regval =
4612 				    E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4613 			else
4614 				hsflctl.regval =
4615 				    E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4616 
4617 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4618 			if (hw->mac.type == e1000_pch_spt)
4619 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4620 							hsflctl.regval << 16);
4621 			else
4622 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4623 							hsflctl.regval);
4624 
4625 			/* Write the last 24 bits of an index within the
4626 			 * block into Flash Linear address field in Flash
4627 			 * Address.
4628 			 */
4629 			flash_linear_addr += (j * sector_size);
4630 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4631 					      flash_linear_addr);
4632 
4633 			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4634 			if (ret_val == E1000_SUCCESS)
4635 				break;
4636 
4637 			/* Check if FCERR is set to 1.  If 1,
4638 			 * clear it and try the whole sequence
4639 			 * a few more times else Done
4640 			 */
4641 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4642 						      ICH_FLASH_HSFSTS);
4643 			if (hsfsts.hsf_status.flcerr)
4644 				/* repeat for some time before giving up */
4645 				continue;
4646 			else if (!hsfsts.hsf_status.flcdone)
4647 				return ret_val;
4648 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4649 	}
4650 
4651 	return E1000_SUCCESS;
4652 }
4653 
4654 /**
4655  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4656  *  @hw: pointer to the HW structure
4657  *  @data: Pointer to the LED settings
4658  *
4659  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4660  *  settings is all 0's or F's, set the LED default to a valid LED default
4661  *  setting.
4662  **/
4663 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4664 {
4665 	s32 ret_val;
4666 
4667 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
4668 
4669 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4670 	if (ret_val) {
4671 		DEBUGOUT("NVM Read Error\n");
4672 		return ret_val;
4673 	}
4674 
4675 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4676 		*data = ID_LED_DEFAULT_ICH8LAN;
4677 
4678 	return E1000_SUCCESS;
4679 }
4680 
4681 /**
4682  *  e1000_id_led_init_pchlan - store LED configurations
4683  *  @hw: pointer to the HW structure
4684  *
4685  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4686  *  the PHY LED configuration register.
4687  *
4688  *  PCH also does not have an "always on" or "always off" mode which
4689  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4690  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4691  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4692  *  link based on logic in e1000_led_[on|off]_pchlan().
4693  **/
4694 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4695 {
4696 	struct e1000_mac_info *mac = &hw->mac;
4697 	s32 ret_val;
4698 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4699 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4700 	u16 data, i, temp, shift;
4701 
4702 	DEBUGFUNC("e1000_id_led_init_pchlan");
4703 
4704 	/* Get default ID LED modes */
4705 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4706 	if (ret_val)
4707 		return ret_val;
4708 
4709 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4710 	mac->ledctl_mode1 = mac->ledctl_default;
4711 	mac->ledctl_mode2 = mac->ledctl_default;
4712 
4713 	for (i = 0; i < 4; i++) {
4714 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4715 		shift = (i * 5);
4716 		switch (temp) {
4717 		case ID_LED_ON1_DEF2:
4718 		case ID_LED_ON1_ON2:
4719 		case ID_LED_ON1_OFF2:
4720 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4721 			mac->ledctl_mode1 |= (ledctl_on << shift);
4722 			break;
4723 		case ID_LED_OFF1_DEF2:
4724 		case ID_LED_OFF1_ON2:
4725 		case ID_LED_OFF1_OFF2:
4726 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4727 			mac->ledctl_mode1 |= (ledctl_off << shift);
4728 			break;
4729 		default:
4730 			/* Do nothing */
4731 			break;
4732 		}
4733 		switch (temp) {
4734 		case ID_LED_DEF1_ON2:
4735 		case ID_LED_ON1_ON2:
4736 		case ID_LED_OFF1_ON2:
4737 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4738 			mac->ledctl_mode2 |= (ledctl_on << shift);
4739 			break;
4740 		case ID_LED_DEF1_OFF2:
4741 		case ID_LED_ON1_OFF2:
4742 		case ID_LED_OFF1_OFF2:
4743 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4744 			mac->ledctl_mode2 |= (ledctl_off << shift);
4745 			break;
4746 		default:
4747 			/* Do nothing */
4748 			break;
4749 		}
4750 	}
4751 
4752 	return E1000_SUCCESS;
4753 }
4754 
4755 /**
4756  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4757  *  @hw: pointer to the HW structure
4758  *
4759  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4760  *  register, so the the bus width is hard coded.
4761  **/
4762 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4763 {
4764 	struct e1000_bus_info *bus = &hw->bus;
4765 	s32 ret_val;
4766 
4767 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
4768 
4769 	ret_val = e1000_get_bus_info_pcie_generic(hw);
4770 
4771 	/* ICH devices are "PCI Express"-ish.  They have
4772 	 * a configuration space, but do not contain
4773 	 * PCI Express Capability registers, so bus width
4774 	 * must be hardcoded.
4775 	 */
4776 	if (bus->width == e1000_bus_width_unknown)
4777 		bus->width = e1000_bus_width_pcie_x1;
4778 
4779 	return ret_val;
4780 }
4781 
4782 /**
4783  *  e1000_reset_hw_ich8lan - Reset the hardware
4784  *  @hw: pointer to the HW structure
4785  *
4786  *  Does a full reset of the hardware which includes a reset of the PHY and
4787  *  MAC.
4788  **/
4789 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4790 {
4791 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4792 	u16 kum_cfg;
4793 	u32 ctrl, reg;
4794 	s32 ret_val;
4795 
4796 	DEBUGFUNC("e1000_reset_hw_ich8lan");
4797 
4798 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4799 	 * on the last TLP read/write transaction when MAC is reset.
4800 	 */
4801 	ret_val = e1000_disable_pcie_master_generic(hw);
4802 	if (ret_val)
4803 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
4804 
4805 	DEBUGOUT("Masking off all interrupts\n");
4806 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4807 
4808 	/* Disable the Transmit and Receive units.  Then delay to allow
4809 	 * any pending transactions to complete before we hit the MAC
4810 	 * with the global reset.
4811 	 */
4812 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
4813 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4814 	E1000_WRITE_FLUSH(hw);
4815 
4816 	msec_delay(10);
4817 
4818 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4819 	if (hw->mac.type == e1000_ich8lan) {
4820 		/* Set Tx and Rx buffer allocation to 8k apiece. */
4821 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4822 		/* Set Packet Buffer Size to 16k. */
4823 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4824 	}
4825 
4826 	if (hw->mac.type == e1000_pchlan) {
4827 		/* Save the NVM K1 bit setting*/
4828 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4829 		if (ret_val)
4830 			return ret_val;
4831 
4832 		if (kum_cfg & E1000_NVM_K1_ENABLE)
4833 			dev_spec->nvm_k1_enabled = TRUE;
4834 		else
4835 			dev_spec->nvm_k1_enabled = FALSE;
4836 	}
4837 
4838 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4839 
4840 	if (!hw->phy.ops.check_reset_block(hw)) {
4841 		/* Full-chip reset requires MAC and PHY reset at the same
4842 		 * time to make sure the interface between MAC and the
4843 		 * external PHY is reset.
4844 		 */
4845 		ctrl |= E1000_CTRL_PHY_RST;
4846 
4847 		/* Gate automatic PHY configuration by hardware on
4848 		 * non-managed 82579
4849 		 */
4850 		if ((hw->mac.type == e1000_pch2lan) &&
4851 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4852 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
4853 	}
4854 	ret_val = e1000_acquire_swflag_ich8lan(hw);
4855 	DEBUGOUT("Issuing a global reset to ich8lan\n");
4856 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4857 	/* cannot issue a flush here because it hangs the hardware */
4858 	msec_delay(20);
4859 
4860 	/* Set Phy Config Counter to 50msec */
4861 	if (hw->mac.type == e1000_pch2lan) {
4862 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4863 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4864 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4865 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4866 	}
4867 
4868 	if (ctrl & E1000_CTRL_PHY_RST) {
4869 		ret_val = hw->phy.ops.get_cfg_done(hw);
4870 		if (ret_val)
4871 			return ret_val;
4872 
4873 		ret_val = e1000_post_phy_reset_ich8lan(hw);
4874 		if (ret_val)
4875 			return ret_val;
4876 	}
4877 
4878 	/* For PCH, this write will make sure that any noise
4879 	 * will be detected as a CRC error and be dropped rather than show up
4880 	 * as a bad packet to the DMA engine.
4881 	 */
4882 	if (hw->mac.type == e1000_pchlan)
4883 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4884 
4885 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4886 	E1000_READ_REG(hw, E1000_ICR);
4887 
4888 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
4889 	reg |= E1000_KABGTXD_BGSQLBIAS;
4890 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4891 
4892 	return E1000_SUCCESS;
4893 }
4894 
4895 /**
4896  *  e1000_init_hw_ich8lan - Initialize the hardware
4897  *  @hw: pointer to the HW structure
4898  *
4899  *  Prepares the hardware for transmit and receive by doing the following:
4900  *   - initialize hardware bits
4901  *   - initialize LED identification
4902  *   - setup receive address registers
4903  *   - setup flow control
4904  *   - setup transmit descriptors
4905  *   - clear statistics
4906  **/
4907 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4908 {
4909 	struct e1000_mac_info *mac = &hw->mac;
4910 	u32 ctrl_ext, txdctl, snoop;
4911 	s32 ret_val;
4912 	u16 i;
4913 
4914 	DEBUGFUNC("e1000_init_hw_ich8lan");
4915 
4916 	e1000_initialize_hw_bits_ich8lan(hw);
4917 
4918 	/* Initialize identification LED */
4919 	ret_val = mac->ops.id_led_init(hw);
4920 	/* An error is not fatal and we should not stop init due to this */
4921 	if (ret_val)
4922 		DEBUGOUT("Error initializing identification LED\n");
4923 
4924 	/* Setup the receive address. */
4925 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4926 
4927 	/* Zero out the Multicast HASH table */
4928 	DEBUGOUT("Zeroing the MTA\n");
4929 	for (i = 0; i < mac->mta_reg_count; i++)
4930 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4931 
4932 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
4933 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
4934 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
4935 	 */
4936 	if (hw->phy.type == e1000_phy_82578) {
4937 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4938 		i &= ~BM_WUC_HOST_WU_BIT;
4939 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4940 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
4941 		if (ret_val)
4942 			return ret_val;
4943 	}
4944 
4945 	/* Setup link and flow control */
4946 	ret_val = mac->ops.setup_link(hw);
4947 
4948 	/* Set the transmit descriptor write-back policy for both queues */
4949 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4950 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4951 		  E1000_TXDCTL_FULL_TX_DESC_WB);
4952 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4953 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4954 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4955 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4956 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4957 		  E1000_TXDCTL_FULL_TX_DESC_WB);
4958 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4959 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4960 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4961 
4962 	/* ICH8 has opposite polarity of no_snoop bits.
4963 	 * By default, we should use snoop behavior.
4964 	 */
4965 	if (mac->type == e1000_ich8lan)
4966 		snoop = PCIE_ICH8_SNOOP_ALL;
4967 	else
4968 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4969 	e1000_set_pcie_no_snoop_generic(hw, snoop);
4970 
4971 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4972 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4973 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4974 
4975 	/* Clear all of the statistics registers (clear on read).  It is
4976 	 * important that we do this after we have tried to establish link
4977 	 * because the symbol error count will increment wildly if there
4978 	 * is no link.
4979 	 */
4980 	e1000_clear_hw_cntrs_ich8lan(hw);
4981 
4982 	return ret_val;
4983 }
4984 
4985 /**
4986  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4987  *  @hw: pointer to the HW structure
4988  *
4989  *  Sets/Clears required hardware bits necessary for correctly setting up the
4990  *  hardware for transmit and receive.
4991  **/
4992 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4993 {
4994 	u32 reg;
4995 
4996 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4997 
4998 	/* Extended Device Control */
4999 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5000 	reg |= (1 << 22);
5001 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
5002 	if (hw->mac.type >= e1000_pchlan)
5003 		reg |= E1000_CTRL_EXT_PHYPDEN;
5004 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5005 
5006 	/* Transmit Descriptor Control 0 */
5007 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5008 	reg |= (1 << 22);
5009 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5010 
5011 	/* Transmit Descriptor Control 1 */
5012 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5013 	reg |= (1 << 22);
5014 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5015 
5016 	/* Transmit Arbitration Control 0 */
5017 	reg = E1000_READ_REG(hw, E1000_TARC(0));
5018 	if (hw->mac.type == e1000_ich8lan)
5019 		reg |= (1 << 28) | (1 << 29);
5020 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5021 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5022 
5023 	/* Transmit Arbitration Control 1 */
5024 	reg = E1000_READ_REG(hw, E1000_TARC(1));
5025 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5026 		reg &= ~(1 << 28);
5027 	else
5028 		reg |= (1 << 28);
5029 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
5030 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5031 
5032 	/* Device Status */
5033 	if (hw->mac.type == e1000_ich8lan) {
5034 		reg = E1000_READ_REG(hw, E1000_STATUS);
5035 		reg &= ~(1 << 31);
5036 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
5037 	}
5038 
5039 	/* work-around descriptor data corruption issue during nfs v2 udp
5040 	 * traffic, just disable the nfs filtering capability
5041 	 */
5042 	reg = E1000_READ_REG(hw, E1000_RFCTL);
5043 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5044 
5045 	/* Disable IPv6 extension header parsing because some malformed
5046 	 * IPv6 headers can hang the Rx.
5047 	 */
5048 	if (hw->mac.type == e1000_ich8lan)
5049 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5050 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5051 
5052 	/* Enable ECC on Lynxpoint */
5053 	if (hw->mac.type == e1000_pch_lpt ||
5054 	    hw->mac.type == e1000_pch_spt) {
5055 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5056 		reg |= E1000_PBECCSTS_ECC_ENABLE;
5057 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5058 
5059 		reg = E1000_READ_REG(hw, E1000_CTRL);
5060 		reg |= E1000_CTRL_MEHE;
5061 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
5062 	}
5063 
5064 	return;
5065 }
5066 
5067 /**
5068  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5069  *  @hw: pointer to the HW structure
5070  *
5071  *  Determines which flow control settings to use, then configures flow
5072  *  control.  Calls the appropriate media-specific link configuration
5073  *  function.  Assuming the adapter has a valid link partner, a valid link
5074  *  should be established.  Assumes the hardware has previously been reset
5075  *  and the transmitter and receiver are not enabled.
5076  **/
5077 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5078 {
5079 	s32 ret_val;
5080 
5081 	DEBUGFUNC("e1000_setup_link_ich8lan");
5082 
5083 	if (hw->phy.ops.check_reset_block(hw))
5084 		return E1000_SUCCESS;
5085 
5086 	/* ICH parts do not have a word in the NVM to determine
5087 	 * the default flow control setting, so we explicitly
5088 	 * set it to full.
5089 	 */
5090 	if (hw->fc.requested_mode == e1000_fc_default)
5091 		hw->fc.requested_mode = e1000_fc_full;
5092 
5093 	/* Save off the requested flow control mode for use later.  Depending
5094 	 * on the link partner's capabilities, we may or may not use this mode.
5095 	 */
5096 	hw->fc.current_mode = hw->fc.requested_mode;
5097 
5098 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5099 		hw->fc.current_mode);
5100 
5101 	/* Continue to configure the copper link. */
5102 	ret_val = hw->mac.ops.setup_physical_interface(hw);
5103 	if (ret_val)
5104 		return ret_val;
5105 
5106 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5107 	if ((hw->phy.type == e1000_phy_82578) ||
5108 	    (hw->phy.type == e1000_phy_82579) ||
5109 	    (hw->phy.type == e1000_phy_i217) ||
5110 	    (hw->phy.type == e1000_phy_82577)) {
5111 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5112 
5113 		ret_val = hw->phy.ops.write_reg(hw,
5114 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
5115 					     hw->fc.pause_time);
5116 		if (ret_val)
5117 			return ret_val;
5118 	}
5119 
5120 	return e1000_set_fc_watermarks_generic(hw);
5121 }
5122 
5123 /**
5124  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5125  *  @hw: pointer to the HW structure
5126  *
5127  *  Configures the kumeran interface to the PHY to wait the appropriate time
5128  *  when polling the PHY, then call the generic setup_copper_link to finish
5129  *  configuring the copper link.
5130  **/
5131 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5132 {
5133 	u32 ctrl;
5134 	s32 ret_val;
5135 	u16 reg_data;
5136 
5137 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5138 
5139 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5140 	ctrl |= E1000_CTRL_SLU;
5141 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5142 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5143 
5144 	/* Set the mac to wait the maximum time between each iteration
5145 	 * and increase the max iterations when polling the phy;
5146 	 * this fixes erroneous timeouts at 10Mbps.
5147 	 */
5148 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5149 					       0xFFFF);
5150 	if (ret_val)
5151 		return ret_val;
5152 	ret_val = e1000_read_kmrn_reg_generic(hw,
5153 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
5154 					      &reg_data);
5155 	if (ret_val)
5156 		return ret_val;
5157 	reg_data |= 0x3F;
5158 	ret_val = e1000_write_kmrn_reg_generic(hw,
5159 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
5160 					       reg_data);
5161 	if (ret_val)
5162 		return ret_val;
5163 
5164 	switch (hw->phy.type) {
5165 	case e1000_phy_igp_3:
5166 		ret_val = e1000_copper_link_setup_igp(hw);
5167 		if (ret_val)
5168 			return ret_val;
5169 		break;
5170 	case e1000_phy_bm:
5171 	case e1000_phy_82578:
5172 		ret_val = e1000_copper_link_setup_m88(hw);
5173 		if (ret_val)
5174 			return ret_val;
5175 		break;
5176 	case e1000_phy_82577:
5177 	case e1000_phy_82579:
5178 		ret_val = e1000_copper_link_setup_82577(hw);
5179 		if (ret_val)
5180 			return ret_val;
5181 		break;
5182 	case e1000_phy_ife:
5183 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5184 					       &reg_data);
5185 		if (ret_val)
5186 			return ret_val;
5187 
5188 		reg_data &= ~IFE_PMC_AUTO_MDIX;
5189 
5190 		switch (hw->phy.mdix) {
5191 		case 1:
5192 			reg_data &= ~IFE_PMC_FORCE_MDIX;
5193 			break;
5194 		case 2:
5195 			reg_data |= IFE_PMC_FORCE_MDIX;
5196 			break;
5197 		case 0:
5198 		default:
5199 			reg_data |= IFE_PMC_AUTO_MDIX;
5200 			break;
5201 		}
5202 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5203 						reg_data);
5204 		if (ret_val)
5205 			return ret_val;
5206 		break;
5207 	default:
5208 		break;
5209 	}
5210 
5211 	return e1000_setup_copper_link_generic(hw);
5212 }
5213 
5214 /**
5215  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5216  *  @hw: pointer to the HW structure
5217  *
5218  *  Calls the PHY specific link setup function and then calls the
5219  *  generic setup_copper_link to finish configuring the link for
5220  *  Lynxpoint PCH devices
5221  **/
5222 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5223 {
5224 	u32 ctrl;
5225 	s32 ret_val;
5226 
5227 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5228 
5229 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5230 	ctrl |= E1000_CTRL_SLU;
5231 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5232 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5233 
5234 	ret_val = e1000_copper_link_setup_82577(hw);
5235 	if (ret_val)
5236 		return ret_val;
5237 
5238 	return e1000_setup_copper_link_generic(hw);
5239 }
5240 
5241 /**
5242  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5243  *  @hw: pointer to the HW structure
5244  *  @speed: pointer to store current link speed
5245  *  @duplex: pointer to store the current link duplex
5246  *
5247  *  Calls the generic get_speed_and_duplex to retrieve the current link
5248  *  information and then calls the Kumeran lock loss workaround for links at
5249  *  gigabit speeds.
5250  **/
5251 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5252 					  u16 *duplex)
5253 {
5254 	s32 ret_val;
5255 
5256 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5257 
5258 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5259 	if (ret_val)
5260 		return ret_val;
5261 
5262 	if ((hw->mac.type == e1000_ich8lan) &&
5263 	    (hw->phy.type == e1000_phy_igp_3) &&
5264 	    (*speed == SPEED_1000)) {
5265 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5266 	}
5267 
5268 	return ret_val;
5269 }
5270 
5271 /**
5272  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5273  *  @hw: pointer to the HW structure
5274  *
5275  *  Work-around for 82566 Kumeran PCS lock loss:
5276  *  On link status change (i.e. PCI reset, speed change) and link is up and
5277  *  speed is gigabit-
5278  *    0) if workaround is optionally disabled do nothing
5279  *    1) wait 1ms for Kumeran link to come up
5280  *    2) check Kumeran Diagnostic register PCS lock loss bit
5281  *    3) if not set the link is locked (all is good), otherwise...
5282  *    4) reset the PHY
5283  *    5) repeat up to 10 times
5284  *  Note: this is only called for IGP3 copper when speed is 1gb.
5285  **/
5286 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5287 {
5288 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5289 	u32 phy_ctrl;
5290 	s32 ret_val;
5291 	u16 i, data;
5292 	bool link;
5293 
5294 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5295 
5296 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5297 		return E1000_SUCCESS;
5298 
5299 	/* Make sure link is up before proceeding.  If not just return.
5300 	 * Attempting this while link is negotiating fouled up link
5301 	 * stability
5302 	 */
5303 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5304 	if (!link)
5305 		return E1000_SUCCESS;
5306 
5307 	for (i = 0; i < 10; i++) {
5308 		/* read once to clear */
5309 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5310 		if (ret_val)
5311 			return ret_val;
5312 		/* and again to get new status */
5313 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5314 		if (ret_val)
5315 			return ret_val;
5316 
5317 		/* check for PCS lock */
5318 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5319 			return E1000_SUCCESS;
5320 
5321 		/* Issue PHY reset */
5322 		hw->phy.ops.reset(hw);
5323 		msec_delay_irq(5);
5324 	}
5325 	/* Disable GigE link negotiation */
5326 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5327 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5328 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5329 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5330 
5331 	/* Call gig speed drop workaround on Gig disable before accessing
5332 	 * any PHY registers
5333 	 */
5334 	e1000_gig_downshift_workaround_ich8lan(hw);
5335 
5336 	/* unable to acquire PCS lock */
5337 	return -E1000_ERR_PHY;
5338 }
5339 
5340 /**
5341  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5342  *  @hw: pointer to the HW structure
5343  *  @state: boolean value used to set the current Kumeran workaround state
5344  *
5345  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
5346  *  /disabled - FALSE).
5347  **/
5348 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5349 						 bool state)
5350 {
5351 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5352 
5353 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5354 
5355 	if (hw->mac.type != e1000_ich8lan) {
5356 		DEBUGOUT("Workaround applies to ICH8 only.\n");
5357 		return;
5358 	}
5359 
5360 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5361 
5362 	return;
5363 }
5364 
5365 /**
5366  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5367  *  @hw: pointer to the HW structure
5368  *
5369  *  Workaround for 82566 power-down on D3 entry:
5370  *    1) disable gigabit link
5371  *    2) write VR power-down enable
5372  *    3) read it back
5373  *  Continue if successful, else issue LCD reset and repeat
5374  **/
5375 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5376 {
5377 	u32 reg;
5378 	u16 data;
5379 	u8  retry = 0;
5380 
5381 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5382 
5383 	if (hw->phy.type != e1000_phy_igp_3)
5384 		return;
5385 
5386 	/* Try the workaround twice (if needed) */
5387 	do {
5388 		/* Disable link */
5389 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5390 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5391 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5392 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5393 
5394 		/* Call gig speed drop workaround on Gig disable before
5395 		 * accessing any PHY registers
5396 		 */
5397 		if (hw->mac.type == e1000_ich8lan)
5398 			e1000_gig_downshift_workaround_ich8lan(hw);
5399 
5400 		/* Write VR power-down enable */
5401 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5402 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5403 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5404 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5405 
5406 		/* Read it back and test */
5407 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5408 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5409 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5410 			break;
5411 
5412 		/* Issue PHY reset and repeat at most one more time */
5413 		reg = E1000_READ_REG(hw, E1000_CTRL);
5414 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5415 		retry++;
5416 	} while (retry);
5417 }
5418 
5419 /**
5420  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5421  *  @hw: pointer to the HW structure
5422  *
5423  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5424  *  LPLU, Gig disable, MDIC PHY reset):
5425  *    1) Set Kumeran Near-end loopback
5426  *    2) Clear Kumeran Near-end loopback
5427  *  Should only be called for ICH8[m] devices with any 1G Phy.
5428  **/
5429 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5430 {
5431 	s32 ret_val;
5432 	u16 reg_data;
5433 
5434 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5435 
5436 	if ((hw->mac.type != e1000_ich8lan) ||
5437 	    (hw->phy.type == e1000_phy_ife))
5438 		return;
5439 
5440 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5441 					      &reg_data);
5442 	if (ret_val)
5443 		return;
5444 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5445 	ret_val = e1000_write_kmrn_reg_generic(hw,
5446 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
5447 					       reg_data);
5448 	if (ret_val)
5449 		return;
5450 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5451 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5452 				     reg_data);
5453 }
5454 
5455 /**
5456  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5457  *  @hw: pointer to the HW structure
5458  *
5459  *  During S0 to Sx transition, it is possible the link remains at gig
5460  *  instead of negotiating to a lower speed.  Before going to Sx, set
5461  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5462  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5463  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5464  *  needs to be written.
5465  *  Parts that support (and are linked to a partner which support) EEE in
5466  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5467  *  than 10Mbps w/o EEE.
5468  **/
5469 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5470 {
5471 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5472 	u32 phy_ctrl;
5473 	s32 ret_val;
5474 
5475 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5476 
5477 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5478 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5479 
5480 	if (hw->phy.type == e1000_phy_i217) {
5481 		u16 phy_reg, device_id = hw->device_id;
5482 
5483 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5484 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5485 		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5486 		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5487 		    (hw->mac.type == e1000_pch_spt)) {
5488 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5489 
5490 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5491 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5492 		}
5493 
5494 		ret_val = hw->phy.ops.acquire(hw);
5495 		if (ret_val)
5496 			goto out;
5497 
5498 		if (!dev_spec->eee_disable) {
5499 			u16 eee_advert;
5500 
5501 			ret_val =
5502 			    e1000_read_emi_reg_locked(hw,
5503 						      I217_EEE_ADVERTISEMENT,
5504 						      &eee_advert);
5505 			if (ret_val)
5506 				goto release;
5507 
5508 			/* Disable LPLU if both link partners support 100BaseT
5509 			 * EEE and 100Full is advertised on both ends of the
5510 			 * link, and enable Auto Enable LPI since there will
5511 			 * be no driver to enable LPI while in Sx.
5512 			 */
5513 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5514 			    (dev_spec->eee_lp_ability &
5515 			     I82579_EEE_100_SUPPORTED) &&
5516 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5517 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5518 					      E1000_PHY_CTRL_NOND0A_LPLU);
5519 
5520 				/* Set Auto Enable LPI after link up */
5521 				hw->phy.ops.read_reg_locked(hw,
5522 							    I217_LPI_GPIO_CTRL,
5523 							    &phy_reg);
5524 				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5525 				hw->phy.ops.write_reg_locked(hw,
5526 							     I217_LPI_GPIO_CTRL,
5527 							     phy_reg);
5528 			}
5529 		}
5530 
5531 		/* For i217 Intel Rapid Start Technology support,
5532 		 * when the system is going into Sx and no manageability engine
5533 		 * is present, the driver must configure proxy to reset only on
5534 		 * power good.  LPI (Low Power Idle) state must also reset only
5535 		 * on power good, as well as the MTA (Multicast table array).
5536 		 * The SMBus release must also be disabled on LCD reset.
5537 		 */
5538 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5539 		      E1000_ICH_FWSM_FW_VALID)) {
5540 			/* Enable proxy to reset only on power good. */
5541 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5542 						    &phy_reg);
5543 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5544 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5545 						     phy_reg);
5546 
5547 			/* Set bit enable LPI (EEE) to reset only on
5548 			 * power good.
5549 			*/
5550 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5551 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5552 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5553 
5554 			/* Disable the SMB release on LCD reset. */
5555 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5556 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5557 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5558 		}
5559 
5560 		/* Enable MTA to reset for Intel Rapid Start Technology
5561 		 * Support
5562 		 */
5563 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5564 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5565 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5566 
5567 release:
5568 		hw->phy.ops.release(hw);
5569 	}
5570 out:
5571 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5572 
5573 	if (hw->mac.type == e1000_ich8lan)
5574 		e1000_gig_downshift_workaround_ich8lan(hw);
5575 
5576 	if (hw->mac.type >= e1000_pchlan) {
5577 		e1000_oem_bits_config_ich8lan(hw, FALSE);
5578 
5579 		/* Reset PHY to activate OEM bits on 82577/8 */
5580 		if (hw->mac.type == e1000_pchlan)
5581 			e1000_phy_hw_reset_generic(hw);
5582 
5583 		ret_val = hw->phy.ops.acquire(hw);
5584 		if (ret_val)
5585 			return;
5586 		e1000_write_smbus_addr(hw);
5587 		hw->phy.ops.release(hw);
5588 	}
5589 
5590 	return;
5591 }
5592 
5593 /**
5594  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5595  *  @hw: pointer to the HW structure
5596  *
5597  *  During Sx to S0 transitions on non-managed devices or managed devices
5598  *  on which PHY resets are not blocked, if the PHY registers cannot be
5599  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5600  *  the PHY.
5601  *  On i217, setup Intel Rapid Start Technology.
5602  **/
5603 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5604 {
5605 	s32 ret_val;
5606 
5607 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
5608 
5609 	if (hw->mac.type < e1000_pch2lan)
5610 		return;
5611 
5612 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5613 	if (ret_val) {
5614 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5615 		return;
5616 	}
5617 
5618 	/* For i217 Intel Rapid Start Technology support when the system
5619 	 * is transitioning from Sx and no manageability engine is present
5620 	 * configure SMBus to restore on reset, disable proxy, and enable
5621 	 * the reset on MTA (Multicast table array).
5622 	 */
5623 	if (hw->phy.type == e1000_phy_i217) {
5624 		u16 phy_reg;
5625 
5626 		ret_val = hw->phy.ops.acquire(hw);
5627 		if (ret_val) {
5628 			DEBUGOUT("Failed to setup iRST\n");
5629 			return;
5630 		}
5631 
5632 		/* Clear Auto Enable LPI after link up */
5633 		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5634 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5635 		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5636 
5637 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5638 		    E1000_ICH_FWSM_FW_VALID)) {
5639 			/* Restore clear on SMB if no manageability engine
5640 			 * is present
5641 			 */
5642 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5643 							      &phy_reg);
5644 			if (ret_val)
5645 				goto release;
5646 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5647 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5648 
5649 			/* Disable Proxy */
5650 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5651 		}
5652 		/* Enable reset on MTA */
5653 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5654 						      &phy_reg);
5655 		if (ret_val)
5656 			goto release;
5657 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5658 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5659 release:
5660 		if (ret_val)
5661 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5662 		hw->phy.ops.release(hw);
5663 	}
5664 }
5665 
5666 /**
5667  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5668  *  @hw: pointer to the HW structure
5669  *
5670  *  Return the LED back to the default configuration.
5671  **/
5672 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5673 {
5674 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
5675 
5676 	if (hw->phy.type == e1000_phy_ife)
5677 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5678 					     0);
5679 
5680 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5681 	return E1000_SUCCESS;
5682 }
5683 
5684 /**
5685  *  e1000_led_on_ich8lan - Turn LEDs on
5686  *  @hw: pointer to the HW structure
5687  *
5688  *  Turn on the LEDs.
5689  **/
5690 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5691 {
5692 	DEBUGFUNC("e1000_led_on_ich8lan");
5693 
5694 	if (hw->phy.type == e1000_phy_ife)
5695 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5696 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5697 
5698 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5699 	return E1000_SUCCESS;
5700 }
5701 
5702 /**
5703  *  e1000_led_off_ich8lan - Turn LEDs off
5704  *  @hw: pointer to the HW structure
5705  *
5706  *  Turn off the LEDs.
5707  **/
5708 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5709 {
5710 	DEBUGFUNC("e1000_led_off_ich8lan");
5711 
5712 	if (hw->phy.type == e1000_phy_ife)
5713 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5714 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5715 
5716 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5717 	return E1000_SUCCESS;
5718 }
5719 
5720 /**
5721  *  e1000_setup_led_pchlan - Configures SW controllable LED
5722  *  @hw: pointer to the HW structure
5723  *
5724  *  This prepares the SW controllable LED for use.
5725  **/
5726 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5727 {
5728 	DEBUGFUNC("e1000_setup_led_pchlan");
5729 
5730 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5731 				     (u16)hw->mac.ledctl_mode1);
5732 }
5733 
5734 /**
5735  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5736  *  @hw: pointer to the HW structure
5737  *
5738  *  Return the LED back to the default configuration.
5739  **/
5740 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5741 {
5742 	DEBUGFUNC("e1000_cleanup_led_pchlan");
5743 
5744 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5745 				     (u16)hw->mac.ledctl_default);
5746 }
5747 
5748 /**
5749  *  e1000_led_on_pchlan - Turn LEDs on
5750  *  @hw: pointer to the HW structure
5751  *
5752  *  Turn on the LEDs.
5753  **/
5754 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5755 {
5756 	u16 data = (u16)hw->mac.ledctl_mode2;
5757 	u32 i, led;
5758 
5759 	DEBUGFUNC("e1000_led_on_pchlan");
5760 
5761 	/* If no link, then turn LED on by setting the invert bit
5762 	 * for each LED that's mode is "link_up" in ledctl_mode2.
5763 	 */
5764 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5765 		for (i = 0; i < 3; i++) {
5766 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5767 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5768 			    E1000_LEDCTL_MODE_LINK_UP)
5769 				continue;
5770 			if (led & E1000_PHY_LED0_IVRT)
5771 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5772 			else
5773 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5774 		}
5775 	}
5776 
5777 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5778 }
5779 
5780 /**
5781  *  e1000_led_off_pchlan - Turn LEDs off
5782  *  @hw: pointer to the HW structure
5783  *
5784  *  Turn off the LEDs.
5785  **/
5786 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5787 {
5788 	u16 data = (u16)hw->mac.ledctl_mode1;
5789 	u32 i, led;
5790 
5791 	DEBUGFUNC("e1000_led_off_pchlan");
5792 
5793 	/* If no link, then turn LED off by clearing the invert bit
5794 	 * for each LED that's mode is "link_up" in ledctl_mode1.
5795 	 */
5796 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5797 		for (i = 0; i < 3; i++) {
5798 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5799 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5800 			    E1000_LEDCTL_MODE_LINK_UP)
5801 				continue;
5802 			if (led & E1000_PHY_LED0_IVRT)
5803 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5804 			else
5805 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5806 		}
5807 	}
5808 
5809 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5810 }
5811 
5812 /**
5813  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5814  *  @hw: pointer to the HW structure
5815  *
5816  *  Read appropriate register for the config done bit for completion status
5817  *  and configure the PHY through s/w for EEPROM-less parts.
5818  *
5819  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5820  *  config done bit, so only an error is logged and continues.  If we were
5821  *  to return with error, EEPROM-less silicon would not be able to be reset
5822  *  or change link.
5823  **/
5824 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5825 {
5826 	s32 ret_val = E1000_SUCCESS;
5827 	u32 bank = 0;
5828 	u32 status;
5829 
5830 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5831 
5832 	e1000_get_cfg_done_generic(hw);
5833 
5834 	/* Wait for indication from h/w that it has completed basic config */
5835 	if (hw->mac.type >= e1000_ich10lan) {
5836 		e1000_lan_init_done_ich8lan(hw);
5837 	} else {
5838 		ret_val = e1000_get_auto_rd_done_generic(hw);
5839 		if (ret_val) {
5840 			/* When auto config read does not complete, do not
5841 			 * return with an error. This can happen in situations
5842 			 * where there is no eeprom and prevents getting link.
5843 			 */
5844 			DEBUGOUT("Auto Read Done did not complete\n");
5845 			ret_val = E1000_SUCCESS;
5846 		}
5847 	}
5848 
5849 	/* Clear PHY Reset Asserted bit */
5850 	status = E1000_READ_REG(hw, E1000_STATUS);
5851 	if (status & E1000_STATUS_PHYRA)
5852 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5853 	else
5854 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5855 
5856 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
5857 	if (hw->mac.type <= e1000_ich9lan) {
5858 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5859 		    (hw->phy.type == e1000_phy_igp_3)) {
5860 			e1000_phy_init_script_igp3(hw);
5861 		}
5862 	} else {
5863 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5864 			/* Maybe we should do a basic PHY config */
5865 			DEBUGOUT("EEPROM not present\n");
5866 			ret_val = -E1000_ERR_CONFIG;
5867 		}
5868 	}
5869 
5870 	return ret_val;
5871 }
5872 
5873 /**
5874  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5875  * @hw: pointer to the HW structure
5876  *
5877  * In the case of a PHY power down to save power, or to turn off link during a
5878  * driver unload, or wake on lan is not enabled, remove the link.
5879  **/
5880 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5881 {
5882 	/* If the management interface is not enabled, then power down */
5883 	if (!(hw->mac.ops.check_mng_mode(hw) ||
5884 	      hw->phy.ops.check_reset_block(hw)))
5885 		e1000_power_down_phy_copper(hw);
5886 
5887 	return;
5888 }
5889 
5890 /**
5891  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5892  *  @hw: pointer to the HW structure
5893  *
5894  *  Clears hardware counters specific to the silicon family and calls
5895  *  clear_hw_cntrs_generic to clear all general purpose counters.
5896  **/
5897 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5898 {
5899 	u16 phy_data;
5900 	s32 ret_val;
5901 
5902 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5903 
5904 	e1000_clear_hw_cntrs_base_generic(hw);
5905 
5906 	E1000_READ_REG(hw, E1000_ALGNERRC);
5907 	E1000_READ_REG(hw, E1000_RXERRC);
5908 	E1000_READ_REG(hw, E1000_TNCRS);
5909 	E1000_READ_REG(hw, E1000_CEXTERR);
5910 	E1000_READ_REG(hw, E1000_TSCTC);
5911 	E1000_READ_REG(hw, E1000_TSCTFC);
5912 
5913 	E1000_READ_REG(hw, E1000_MGTPRC);
5914 	E1000_READ_REG(hw, E1000_MGTPDC);
5915 	E1000_READ_REG(hw, E1000_MGTPTC);
5916 
5917 	E1000_READ_REG(hw, E1000_IAC);
5918 	E1000_READ_REG(hw, E1000_ICRXOC);
5919 
5920 	/* Clear PHY statistics registers */
5921 	if ((hw->phy.type == e1000_phy_82578) ||
5922 	    (hw->phy.type == e1000_phy_82579) ||
5923 	    (hw->phy.type == e1000_phy_i217) ||
5924 	    (hw->phy.type == e1000_phy_82577)) {
5925 		ret_val = hw->phy.ops.acquire(hw);
5926 		if (ret_val)
5927 			return;
5928 		ret_val = hw->phy.ops.set_page(hw,
5929 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
5930 		if (ret_val)
5931 			goto release;
5932 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5933 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5934 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5935 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5936 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5937 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5938 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5939 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5940 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5941 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5942 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5943 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5944 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5945 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5946 release:
5947 		hw->phy.ops.release(hw);
5948 	}
5949 }
5950 
5951