xref: /dragonfly/sys/dev/netif/ig_hal/e1000_ich8lan.c (revision 3851e4b8)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  * Ethernet Connection I217-LM
63  * Ethernet Connection I217-V
64  * Ethernet Connection I218-V
65  * Ethernet Connection I218-LM
66  * Ethernet Connection (2) I218-LM
67  * Ethernet Connection (2) I218-V
68  * Ethernet Connection (3) I218-LM
69  * Ethernet Connection (3) I218-V
70  */
71 
72 #include "e1000_api.h"
73 
74 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84 					      u8 *mc_addr_list,
85 					      u32 mc_addr_count);
86 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90 					    bool active);
91 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92 					    bool active);
93 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 				   u16 words, u16 *data);
95 static s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
96 			       u16 *data);
97 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 				    u16 words, u16 *data);
99 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103 					    u16 *data);
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 					   u16 *speed, u16 *duplex);
113 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 					  u32 offset, u8 *data);
127 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128 					  u8 size, u16 *data);
129 static s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130 					    u32 *data);
131 static s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
132 					   u32 offset, u32 *data);
133 static s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
134 					     u32 offset, u32 data);
135 static s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
136 						  u32 offset, u32 dword);
137 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
138 					  u32 offset, u16 *data);
139 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
140 						 u32 offset, u8 byte);
141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
148 
149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
150 /* Offset 04h HSFSTS */
151 union ich8_hws_flash_status {
152 	struct ich8_hsfsts {
153 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
154 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
155 		u16 dael:1; /* bit 2 Direct Access error Log */
156 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
157 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
158 		u16 reserved1:2; /* bit 13:6 Reserved */
159 		u16 reserved2:6; /* bit 13:6 Reserved */
160 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
161 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
162 	} hsf_status;
163 	u16 regval;
164 };
165 
166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
167 /* Offset 06h FLCTL */
168 union ich8_hws_flash_ctrl {
169 	struct ich8_hsflctl {
170 		u16 flcgo:1;   /* 0 Flash Cycle Go */
171 		u16 flcycle:2;   /* 2:1 Flash Cycle */
172 		u16 reserved:5;   /* 7:3 Reserved  */
173 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
174 		u16 flockdn:6;   /* 15:10 Reserved */
175 	} hsf_ctrl;
176 	u16 regval;
177 };
178 
179 /* ICH Flash Region Access Permissions */
180 union ich8_hws_flash_regacc {
181 	struct ich8_flracc {
182 		u32 grra:8; /* 0:7 GbE region Read Access */
183 		u32 grwa:8; /* 8:15 GbE region Write Access */
184 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
185 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
186 	} hsf_flregacc;
187 	u16 regval;
188 };
189 
190 /**
191  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
192  *  @hw: pointer to the HW structure
193  *
194  *  Test access to the PHY registers by reading the PHY ID registers.  If
195  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
196  *  otherwise assume the read PHY ID is correct if it is valid.
197  *
198  *  Assumes the sw/fw/hw semaphore is already acquired.
199  **/
200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
201 {
202 	u16 phy_reg = 0;
203 	u32 phy_id = 0;
204 	s32 ret_val = 0;
205 	u16 retry_count;
206 	u32 mac_reg = 0;
207 
208 	for (retry_count = 0; retry_count < 2; retry_count++) {
209 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
210 		if (ret_val || (phy_reg == 0xFFFF))
211 			continue;
212 		phy_id = (u32)(phy_reg << 16);
213 
214 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
215 		if (ret_val || (phy_reg == 0xFFFF)) {
216 			phy_id = 0;
217 			continue;
218 		}
219 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
220 		break;
221 	}
222 
223 	if (hw->phy.id) {
224 		if  (hw->phy.id == phy_id)
225 			goto out;
226 	} else if (phy_id) {
227 		hw->phy.id = phy_id;
228 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229 		goto out;
230 	}
231 
232 	/* In case the PHY needs to be in mdio slow mode,
233 	 * set slow mode and try to get the PHY id again.
234 	 */
235 	if (hw->mac.type < e1000_pch_lpt) {
236 		hw->phy.ops.release(hw);
237 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
238 		if (!ret_val)
239 			ret_val = e1000_get_phy_id(hw);
240 		hw->phy.ops.acquire(hw);
241 	}
242 
243 	if (ret_val)
244 		return FALSE;
245 out:
246 	if ((hw->mac.type == e1000_pch_lpt) ||
247 	    (hw->mac.type == e1000_pch_spt)) {
248 		/* Only unforce SMBus if ME is not active */
249 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
250 		    E1000_ICH_FWSM_FW_VALID)) {
251 			/* Unforce SMBus mode in PHY */
252 			hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253 			phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254 			hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255 
256 			/* Unforce SMBus mode in MAC */
257 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260 		}
261 	}
262 
263 	return TRUE;
264 }
265 
266 /**
267  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
268  *  @hw: pointer to the HW structure
269  *
270  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
271  *  used to reset the PHY to a quiescent state when necessary.
272  **/
273 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
274 {
275 	u32 mac_reg;
276 
277 	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278 
279 	/* Set Phy Config Counter to 50msec */
280 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
281 	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
282 	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
283 	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284 
285 	/* Toggle LANPHYPC Value bit */
286 	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
287 	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
288 	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
289 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
290 	E1000_WRITE_FLUSH(hw);
291 	usec_delay(10);
292 	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
294 	E1000_WRITE_FLUSH(hw);
295 
296 	if (hw->mac.type < e1000_pch_lpt) {
297 		msec_delay(50);
298 	} else {
299 		u16 count = 20;
300 
301 		do {
302 			msec_delay(5);
303 		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
304 			   E1000_CTRL_EXT_LPCD) && count--);
305 
306 		msec_delay(30);
307 	}
308 }
309 
310 /**
311  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
312  *  @hw: pointer to the HW structure
313  *
314  *  Workarounds/flow necessary for PHY initialization during driver load
315  *  and resume paths.
316  **/
317 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318 {
319 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
320 	s32 ret_val;
321 
322 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323 
324 	/* Gate automatic PHY configuration by hardware on managed and
325 	 * non-managed 82579 and newer adapters.
326 	 */
327 	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
328 
329 	/* It is not possible to be certain of the current state of ULP
330 	 * so forcibly disable it.
331 	 */
332 	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
333 	e1000_disable_ulp_lpt_lp(hw, TRUE);
334 
335 	ret_val = hw->phy.ops.acquire(hw);
336 	if (ret_val) {
337 		DEBUGOUT("Failed to initialize PHY flow\n");
338 		goto out;
339 	}
340 
341 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
342 	 * inaccessible and resetting the PHY is not blocked, toggle the
343 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
344 	 */
345 	switch (hw->mac.type) {
346 	case e1000_pch_lpt:
347 	case e1000_pch_spt:
348 		if (e1000_phy_is_accessible_pchlan(hw))
349 			break;
350 
351 		/* Before toggling LANPHYPC, see if PHY is accessible by
352 		 * forcing MAC to SMBus mode first.
353 		 */
354 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
355 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
356 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
357 
358 		/* Wait 50 milliseconds for MAC to finish any retries
359 		 * that it might be trying to perform from previous
360 		 * attempts to acknowledge any phy read requests.
361 		 */
362 		 msec_delay(50);
363 
364 		/* fall-through */
365 	case e1000_pch2lan:
366 		if (e1000_phy_is_accessible_pchlan(hw))
367 			break;
368 
369 		/* fall-through */
370 	case e1000_pchlan:
371 		if ((hw->mac.type == e1000_pchlan) &&
372 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
373 			break;
374 
375 		if (hw->phy.ops.check_reset_block(hw)) {
376 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
377 			ret_val = -E1000_ERR_PHY;
378 			break;
379 		}
380 
381 		/* Toggle LANPHYPC Value bit */
382 		e1000_toggle_lanphypc_pch_lpt(hw);
383 		if (hw->mac.type >= e1000_pch_lpt) {
384 			if (e1000_phy_is_accessible_pchlan(hw))
385 				break;
386 
387 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
388 			 * so ensure that the MAC is also out of SMBus mode
389 			 */
390 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
391 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
392 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
393 
394 			if (e1000_phy_is_accessible_pchlan(hw))
395 				break;
396 
397 			ret_val = -E1000_ERR_PHY;
398 		}
399 		break;
400 	default:
401 		break;
402 	}
403 
404 	hw->phy.ops.release(hw);
405 	if (!ret_val) {
406 
407 		/* Check to see if able to reset PHY.  Print error if not */
408 		if (hw->phy.ops.check_reset_block(hw)) {
409 			ERROR_REPORT("Reset blocked by ME\n");
410 			goto out;
411 		}
412 
413 		/* Reset the PHY before any access to it.  Doing so, ensures
414 		 * that the PHY is in a known good state before we read/write
415 		 * PHY registers.  The generic reset is sufficient here,
416 		 * because we haven't determined the PHY type yet.
417 		 */
418 		ret_val = e1000_phy_hw_reset_generic(hw);
419 		if (ret_val)
420 			goto out;
421 
422 		/* On a successful reset, possibly need to wait for the PHY
423 		 * to quiesce to an accessible state before returning control
424 		 * to the calling function.  If the PHY does not quiesce, then
425 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
426 		 *  the PHY is in.
427 		 */
428 		ret_val = hw->phy.ops.check_reset_block(hw);
429 		if (ret_val)
430 			ERROR_REPORT("ME blocked access to PHY after reset\n");
431 	}
432 
433 out:
434 	/* Ungate automatic PHY configuration on non-managed 82579 */
435 	if ((hw->mac.type == e1000_pch2lan) &&
436 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
437 		msec_delay(10);
438 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
439 	}
440 
441 	return ret_val;
442 }
443 
444 /**
445  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
446  *  @hw: pointer to the HW structure
447  *
448  *  Initialize family-specific PHY parameters and function pointers.
449  **/
450 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
451 {
452 	struct e1000_phy_info *phy = &hw->phy;
453 	s32 ret_val;
454 
455 	DEBUGFUNC("e1000_init_phy_params_pchlan");
456 
457 	phy->addr		= 1;
458 	phy->reset_delay_us	= 100;
459 
460 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
461 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
462 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
463 	phy->ops.set_page	= e1000_set_page_igp;
464 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
465 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
466 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
467 	phy->ops.release	= e1000_release_swflag_ich8lan;
468 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
469 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
470 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
471 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
472 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
473 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
474 	phy->ops.power_up	= e1000_power_up_phy_copper;
475 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
476 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
477 
478 	phy->id = e1000_phy_unknown;
479 
480 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
481 	if (ret_val)
482 		return ret_val;
483 
484 	if (phy->id == e1000_phy_unknown)
485 		switch (hw->mac.type) {
486 		default:
487 			ret_val = e1000_get_phy_id(hw);
488 			if (ret_val)
489 				return ret_val;
490 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
491 				break;
492 			/* fall-through */
493 		case e1000_pch2lan:
494 		case e1000_pch_lpt:
495 		case e1000_pch_spt:
496 			/* In case the PHY needs to be in mdio slow mode,
497 			 * set slow mode and try to get the PHY id again.
498 			 */
499 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
500 			if (ret_val)
501 				return ret_val;
502 			ret_val = e1000_get_phy_id(hw);
503 			if (ret_val)
504 				return ret_val;
505 			break;
506 		}
507 	phy->type = e1000_get_phy_type_from_id(phy->id);
508 
509 	switch (phy->type) {
510 	case e1000_phy_82577:
511 	case e1000_phy_82579:
512 	case e1000_phy_i217:
513 		phy->ops.check_polarity = e1000_check_polarity_82577;
514 		phy->ops.force_speed_duplex =
515 			e1000_phy_force_speed_duplex_82577;
516 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
517 		phy->ops.get_info = e1000_get_phy_info_82577;
518 		phy->ops.commit = e1000_phy_sw_reset_generic;
519 		break;
520 	case e1000_phy_82578:
521 		phy->ops.check_polarity = e1000_check_polarity_m88;
522 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
523 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
524 		phy->ops.get_info = e1000_get_phy_info_m88;
525 		break;
526 	default:
527 		ret_val = -E1000_ERR_PHY;
528 		break;
529 	}
530 
531 	return ret_val;
532 }
533 
534 /**
535  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
536  *  @hw: pointer to the HW structure
537  *
538  *  Initialize family-specific PHY parameters and function pointers.
539  **/
540 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
541 {
542 	struct e1000_phy_info *phy = &hw->phy;
543 	s32 ret_val;
544 	u16 i = 0;
545 
546 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
547 
548 	phy->addr		= 1;
549 	phy->reset_delay_us	= 100;
550 
551 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
552 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
553 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
554 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
555 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
556 	phy->ops.release	= e1000_release_swflag_ich8lan;
557 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
558 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
559 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
560 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
561 	phy->ops.power_up	= e1000_power_up_phy_copper;
562 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
563 
564 	/* We may need to do this twice - once for IGP and if that fails,
565 	 * we'll set BM func pointers and try again
566 	 */
567 	ret_val = e1000_determine_phy_address(hw);
568 	if (ret_val) {
569 		phy->ops.write_reg = e1000_write_phy_reg_bm;
570 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
571 		ret_val = e1000_determine_phy_address(hw);
572 		if (ret_val) {
573 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
574 			return ret_val;
575 		}
576 	}
577 
578 	phy->id = 0;
579 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
580 	       (i++ < 100)) {
581 		msec_delay(1);
582 		ret_val = e1000_get_phy_id(hw);
583 		if (ret_val)
584 			return ret_val;
585 	}
586 
587 	/* Verify phy id */
588 	switch (phy->id) {
589 	case IGP03E1000_E_PHY_ID:
590 		phy->type = e1000_phy_igp_3;
591 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
592 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
593 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
594 		phy->ops.get_info = e1000_get_phy_info_igp;
595 		phy->ops.check_polarity = e1000_check_polarity_igp;
596 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
597 		break;
598 	case IFE_E_PHY_ID:
599 	case IFE_PLUS_E_PHY_ID:
600 	case IFE_C_E_PHY_ID:
601 		phy->type = e1000_phy_ife;
602 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
603 		phy->ops.get_info = e1000_get_phy_info_ife;
604 		phy->ops.check_polarity = e1000_check_polarity_ife;
605 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
606 		break;
607 	case BME1000_E_PHY_ID:
608 		phy->type = e1000_phy_bm;
609 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
610 		phy->ops.read_reg = e1000_read_phy_reg_bm;
611 		phy->ops.write_reg = e1000_write_phy_reg_bm;
612 		phy->ops.commit = e1000_phy_sw_reset_generic;
613 		phy->ops.get_info = e1000_get_phy_info_m88;
614 		phy->ops.check_polarity = e1000_check_polarity_m88;
615 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
616 		break;
617 	default:
618 		return -E1000_ERR_PHY;
619 		break;
620 	}
621 
622 	return E1000_SUCCESS;
623 }
624 
625 /**
626  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
627  *  @hw: pointer to the HW structure
628  *
629  *  Initialize family-specific NVM parameters and function
630  *  pointers.
631  **/
632 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
633 {
634 	struct e1000_nvm_info *nvm = &hw->nvm;
635 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
636 	u32 gfpreg, sector_base_addr, sector_end_addr;
637 	u16 i;
638 	u32 nvm_size;
639 
640 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
641 
642 	nvm->type = e1000_nvm_flash_sw;
643 
644 	if (hw->mac.type == e1000_pch_spt) {
645 		/* in SPT, gfpreg doesn't exist. NVM size is taken from the
646 		 * STRAP register. This is because in SPT the GbE Flash region
647 		 * is no longer accessed through the flash registers. Instead,
648 		 * the mechanism has changed, and the Flash region access
649 		 * registers are now implemented in GbE memory space.
650 		 */
651 		nvm->flash_base_addr = 0;
652 		nvm_size =
653 		    (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
654 		    * NVM_SIZE_MULTIPLIER;
655 		nvm->flash_bank_size = nvm_size / 2;
656 		/* Adjust to word count */
657 		nvm->flash_bank_size /= sizeof(u16);
658 		/* Set the base address for flash register access */
659 		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
660 	} else {
661 		/* Can't read flash registers if register set isn't mapped. */
662 		if (!hw->flash_address) {
663 			DEBUGOUT("ERROR: Flash registers not mapped\n");
664 			return -E1000_ERR_CONFIG;
665 		}
666 
667 		gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
668 
669 		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
670 		 * Add 1 to sector_end_addr since this sector is included in
671 		 * the overall size.
672 		 */
673 		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
674 		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
675 
676 		/* flash_base_addr is byte-aligned */
677 		nvm->flash_base_addr = sector_base_addr
678 				       << FLASH_SECTOR_ADDR_SHIFT;
679 
680 		/* find total size of the NVM, then cut in half since the total
681 		 * size represents two separate NVM banks.
682 		 */
683 		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
684 					<< FLASH_SECTOR_ADDR_SHIFT);
685 		nvm->flash_bank_size /= 2;
686 		/* Adjust to word count */
687 		nvm->flash_bank_size /= sizeof(u16);
688 	}
689 
690 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
691 
692 	/* Clear shadow ram */
693 	for (i = 0; i < nvm->word_size; i++) {
694 		dev_spec->shadow_ram[i].modified = FALSE;
695 		dev_spec->shadow_ram[i].value    = 0xFFFF;
696 	}
697 
698 	/* Function Pointers */
699 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
700 	nvm->ops.release	= e1000_release_nvm_ich8lan;
701 	if (hw->mac.type == e1000_pch_spt) {
702 		nvm->ops.read	= e1000_read_nvm_spt;
703 		nvm->ops.update	= e1000_update_nvm_checksum_spt;
704 	} else {
705 		nvm->ops.read	= e1000_read_nvm_ich8lan;
706 		nvm->ops.update	= e1000_update_nvm_checksum_ich8lan;
707 	}
708 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
709 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
710 	nvm->ops.write		= e1000_write_nvm_ich8lan;
711 
712 	return E1000_SUCCESS;
713 }
714 
715 /**
716  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
717  *  @hw: pointer to the HW structure
718  *
719  *  Initialize family-specific MAC parameters and function
720  *  pointers.
721  **/
722 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
723 {
724 	struct e1000_mac_info *mac = &hw->mac;
725 
726 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
727 
728 	/* Set media type function pointer */
729 	hw->phy.media_type = e1000_media_type_copper;
730 
731 	/* Set mta register count */
732 	mac->mta_reg_count = 32;
733 	/* Set rar entry count */
734 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
735 	if (mac->type == e1000_ich8lan)
736 		mac->rar_entry_count--;
737 	/* Set if part includes ASF firmware */
738 	mac->asf_firmware_present = TRUE;
739 	/* FWSM register */
740 	mac->has_fwsm = TRUE;
741 	/* ARC subsystem not supported */
742 	mac->arc_subsystem_valid = FALSE;
743 	/* Adaptive IFS supported */
744 	mac->adaptive_ifs = TRUE;
745 
746 	/* Function pointers */
747 
748 	/* bus type/speed/width */
749 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
750 	/* function id */
751 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
752 	/* reset */
753 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
754 	/* hw initialization */
755 	mac->ops.init_hw = e1000_init_hw_ich8lan;
756 	/* link setup */
757 	mac->ops.setup_link = e1000_setup_link_ich8lan;
758 	/* physical interface setup */
759 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
760 	/* check for link */
761 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
762 	/* link info */
763 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
764 	/* multicast address update */
765 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
766 	/* clear hardware counters */
767 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
768 
769 	/* LED and other operations */
770 	switch (mac->type) {
771 	case e1000_ich8lan:
772 	case e1000_ich9lan:
773 	case e1000_ich10lan:
774 		/* check management mode */
775 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
776 		/* ID LED init */
777 		mac->ops.id_led_init = e1000_id_led_init_generic;
778 		/* blink LED */
779 		mac->ops.blink_led = e1000_blink_led_generic;
780 		/* setup LED */
781 		mac->ops.setup_led = e1000_setup_led_generic;
782 		/* cleanup LED */
783 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
784 		/* turn on/off LED */
785 		mac->ops.led_on = e1000_led_on_ich8lan;
786 		mac->ops.led_off = e1000_led_off_ich8lan;
787 		break;
788 	case e1000_pch2lan:
789 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
790 		mac->ops.rar_set = e1000_rar_set_pch2lan;
791 		/* fall-through */
792 	case e1000_pch_lpt:
793 	case e1000_pch_spt:
794 		/* multicast address update for pch2 */
795 		mac->ops.update_mc_addr_list =
796 			e1000_update_mc_addr_list_pch2lan;
797 		/* fall-through */
798 	case e1000_pchlan:
799 		/* check management mode */
800 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
801 		/* ID LED init */
802 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
803 		/* setup LED */
804 		mac->ops.setup_led = e1000_setup_led_pchlan;
805 		/* cleanup LED */
806 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
807 		/* turn on/off LED */
808 		mac->ops.led_on = e1000_led_on_pchlan;
809 		mac->ops.led_off = e1000_led_off_pchlan;
810 		break;
811 	default:
812 		break;
813 	}
814 
815 	if ((mac->type == e1000_pch_lpt) ||
816 	    (mac->type == e1000_pch_spt)) {
817 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
818 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
819 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
820 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
821 	}
822 
823 	/* Enable PCS Lock-loss workaround for ICH8 */
824 	if (mac->type == e1000_ich8lan)
825 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
826 
827 	return E1000_SUCCESS;
828 }
829 
830 /**
831  *  __e1000_access_emi_reg_locked - Read/write EMI register
832  *  @hw: pointer to the HW structure
833  *  @addr: EMI address to program
834  *  @data: pointer to value to read/write from/to the EMI address
835  *  @read: boolean flag to indicate read or write
836  *
837  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
838  **/
839 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
840 					 u16 *data, bool read)
841 {
842 	s32 ret_val;
843 
844 	DEBUGFUNC("__e1000_access_emi_reg_locked");
845 
846 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
847 	if (ret_val)
848 		return ret_val;
849 
850 	if (read)
851 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
852 						      data);
853 	else
854 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
855 						       *data);
856 
857 	return ret_val;
858 }
859 
860 /**
861  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
862  *  @hw: pointer to the HW structure
863  *  @addr: EMI address to program
864  *  @data: value to be read from the EMI address
865  *
866  *  Assumes the SW/FW/HW Semaphore is already acquired.
867  **/
868 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
869 {
870 	DEBUGFUNC("e1000_read_emi_reg_locked");
871 
872 	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
873 }
874 
875 /**
876  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
877  *  @hw: pointer to the HW structure
878  *  @addr: EMI address to program
879  *  @data: value to be written to the EMI address
880  *
881  *  Assumes the SW/FW/HW Semaphore is already acquired.
882  **/
883 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
884 {
885 	DEBUGFUNC("e1000_read_emi_reg_locked");
886 
887 	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
888 }
889 
890 /**
891  *  e1000_set_eee_pchlan - Enable/disable EEE support
892  *  @hw: pointer to the HW structure
893  *
894  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
895  *  the link and the EEE capabilities of the link partner.  The LPI Control
896  *  register bits will remain set only if/when link is up.
897  *
898  *  EEE LPI must not be asserted earlier than one second after link is up.
899  *  On 82579, EEE LPI should not be enabled until such time otherwise there
900  *  can be link issues with some switches.  Other devices can have EEE LPI
901  *  enabled immediately upon link up since they have a timer in hardware which
902  *  prevents LPI from being asserted too early.
903  **/
904 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
905 {
906 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
907 	s32 ret_val;
908 	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
909 
910 	DEBUGFUNC("e1000_set_eee_pchlan");
911 
912 	switch (hw->phy.type) {
913 	case e1000_phy_82579:
914 		lpa = I82579_EEE_LP_ABILITY;
915 		pcs_status = I82579_EEE_PCS_STATUS;
916 		adv_addr = I82579_EEE_ADVERTISEMENT;
917 		break;
918 	case e1000_phy_i217:
919 		lpa = I217_EEE_LP_ABILITY;
920 		pcs_status = I217_EEE_PCS_STATUS;
921 		adv_addr = I217_EEE_ADVERTISEMENT;
922 		break;
923 	default:
924 		return E1000_SUCCESS;
925 	}
926 
927 	ret_val = hw->phy.ops.acquire(hw);
928 	if (ret_val)
929 		return ret_val;
930 
931 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
932 	if (ret_val)
933 		goto release;
934 
935 	/* Clear bits that enable EEE in various speeds */
936 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
937 
938 	/* Enable EEE if not disabled by user */
939 	if (!dev_spec->eee_disable) {
940 		/* Save off link partner's EEE ability */
941 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
942 						    &dev_spec->eee_lp_ability);
943 		if (ret_val)
944 			goto release;
945 
946 		/* Read EEE advertisement */
947 		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
948 		if (ret_val)
949 			goto release;
950 
951 		/* Enable EEE only for speeds in which the link partner is
952 		 * EEE capable and for which we advertise EEE.
953 		 */
954 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
955 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
956 
957 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
958 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
959 			if (data & NWAY_LPAR_100TX_FD_CAPS)
960 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
961 			else
962 				/* EEE is not supported in 100Half, so ignore
963 				 * partner's EEE in 100 ability if full-duplex
964 				 * is not advertised.
965 				 */
966 				dev_spec->eee_lp_ability &=
967 				    ~I82579_EEE_100_SUPPORTED;
968 		}
969 	}
970 
971 	if (hw->phy.type == e1000_phy_82579) {
972 		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
973 						    &data);
974 		if (ret_val)
975 			goto release;
976 
977 		data &= ~I82579_LPI_100_PLL_SHUT;
978 		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
979 						     data);
980 	}
981 
982 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
983 	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
984 	if (ret_val)
985 		goto release;
986 
987 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
988 release:
989 	hw->phy.ops.release(hw);
990 
991 	return ret_val;
992 }
993 
994 /**
995  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
996  *  @hw:   pointer to the HW structure
997  *  @link: link up bool flag
998  *
999  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1000  *  preventing further DMA write requests.  Workaround the issue by disabling
1001  *  the de-assertion of the clock request when in 1Gpbs mode.
1002  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1003  *  speeds in order to avoid Tx hangs.
1004  **/
1005 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1006 {
1007 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1008 	u32 status = E1000_READ_REG(hw, E1000_STATUS);
1009 	s32 ret_val = E1000_SUCCESS;
1010 	u16 reg;
1011 
1012 	if (link && (status & E1000_STATUS_SPEED_1000)) {
1013 		ret_val = hw->phy.ops.acquire(hw);
1014 		if (ret_val)
1015 			return ret_val;
1016 
1017 		ret_val =
1018 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1019 					       &reg);
1020 		if (ret_val)
1021 			goto release;
1022 
1023 		ret_val =
1024 		    e1000_write_kmrn_reg_locked(hw,
1025 						E1000_KMRNCTRLSTA_K1_CONFIG,
1026 						reg &
1027 						~E1000_KMRNCTRLSTA_K1_ENABLE);
1028 		if (ret_val)
1029 			goto release;
1030 
1031 		usec_delay(10);
1032 
1033 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1034 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1035 
1036 		ret_val =
1037 		    e1000_write_kmrn_reg_locked(hw,
1038 						E1000_KMRNCTRLSTA_K1_CONFIG,
1039 						reg);
1040 release:
1041 		hw->phy.ops.release(hw);
1042 	} else {
1043 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
1044 		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1045 
1046 		if ((hw->phy.revision > 5) || !link ||
1047 		    ((status & E1000_STATUS_SPEED_100) &&
1048 		     (status & E1000_STATUS_FD)))
1049 			goto update_fextnvm6;
1050 
1051 		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1052 		if (ret_val)
1053 			return ret_val;
1054 
1055 		/* Clear link status transmit timeout */
1056 		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1057 
1058 		if (status & E1000_STATUS_SPEED_100) {
1059 			/* Set inband Tx timeout to 5x10us for 100Half */
1060 			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1061 
1062 			/* Do not extend the K1 entry latency for 100Half */
1063 			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1064 		} else {
1065 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1066 			reg |= 50 <<
1067 			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1068 
1069 			/* Extend the K1 entry latency for 10 Mbps */
1070 			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1071 		}
1072 
1073 		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1074 		if (ret_val)
1075 			return ret_val;
1076 
1077 update_fextnvm6:
1078 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1079 	}
1080 
1081 	return ret_val;
1082 }
1083 
1084 static u64 e1000_ltr2ns(u16 ltr)
1085 {
1086 	u32 value, scale;
1087 
1088 	/* Determine the latency in nsec based on the LTR value & scale */
1089 	value = ltr & E1000_LTRV_VALUE_MASK;
1090 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1091 
1092 	return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1093 }
1094 
1095 /**
1096  *  e1000_platform_pm_pch_lpt - Set platform power management values
1097  *  @hw: pointer to the HW structure
1098  *  @link: bool indicating link status
1099  *
1100  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1101  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1102  *  when link is up (which must not exceed the maximum latency supported
1103  *  by the platform), otherwise specify there is no LTR requirement.
1104  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1105  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1106  *  Capability register set, on this device LTR is set by writing the
1107  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1108  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1109  *  message to the PMC.
1110  *
1111  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1112  *  high-water mark.
1113  **/
1114 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1115 {
1116 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1117 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1118 	u16 lat_enc = 0;	/* latency encoded */
1119 	s32 obff_hwm = 0;
1120 
1121 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
1122 
1123 	if (link) {
1124 		u16 speed, duplex, scale = 0;
1125 		u16 max_snoop, max_nosnoop;
1126 		u16 max_ltr_enc;	/* max LTR latency encoded */
1127 		s64 lat_ns;
1128 		s64 value;
1129 		u32 rxa;
1130 
1131 		if (!hw->mac.max_frame_size) {
1132 			DEBUGOUT("max_frame_size not set.\n");
1133 			return -E1000_ERR_CONFIG;
1134 		}
1135 
1136 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1137 		if (!speed) {
1138 			DEBUGOUT("Speed not set.\n");
1139 			return -E1000_ERR_CONFIG;
1140 		}
1141 
1142 		/* Rx Packet Buffer Allocation size (KB) */
1143 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1144 
1145 		/* Determine the maximum latency tolerated by the device.
1146 		 *
1147 		 * Per the PCIe spec, the tolerated latencies are encoded as
1148 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1149 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1150 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1151 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1152 		 */
1153 		lat_ns = ((s64)rxa * 1024 -
1154 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1155 		if (lat_ns < 0)
1156 			lat_ns = 0;
1157 		else
1158 			lat_ns /= speed;
1159 		value = lat_ns;
1160 
1161 		while (value > E1000_LTRV_VALUE_MASK) {
1162 			scale++;
1163 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1164 		}
1165 		if (scale > E1000_LTRV_SCALE_MAX) {
1166 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1167 			return -E1000_ERR_CONFIG;
1168 		}
1169 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1170 
1171 		/* Determine the maximum latency tolerated by the platform */
1172 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1173 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1174 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1175 
1176 		if (lat_enc > max_ltr_enc) {
1177 			lat_enc = max_ltr_enc;
1178 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1179 		}
1180 
1181 		if (lat_ns) {
1182 			lat_ns *= speed * 1000;
1183 			lat_ns /= 8;
1184 			lat_ns /= 1000000000;
1185 			obff_hwm = (s32)(rxa - lat_ns);
1186 		}
1187 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1188 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1189 			return -E1000_ERR_CONFIG;
1190 		}
1191 	}
1192 
1193 	/* Set Snoop and No-Snoop latencies the same */
1194 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1195 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1196 
1197 	/* Set OBFF high water mark */
1198 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1199 	reg |= obff_hwm;
1200 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1201 
1202 	/* Enable OBFF */
1203 	reg = E1000_READ_REG(hw, E1000_SVCR);
1204 	reg |= E1000_SVCR_OFF_EN;
1205 	/* Always unblock interrupts to the CPU even when the system is
1206 	 * in OBFF mode. This ensures that small round-robin traffic
1207 	 * (like ping) does not get dropped or experience long latency.
1208 	 */
1209 	reg |= E1000_SVCR_OFF_MASKINT;
1210 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1211 
1212 	return E1000_SUCCESS;
1213 }
1214 
1215 /**
1216  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1217  *  @hw: pointer to the HW structure
1218  *  @itr: interrupt throttling rate
1219  *
1220  *  Configure OBFF with the updated interrupt rate.
1221  **/
1222 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1223 {
1224 	u32 svcr;
1225 	s32 timer;
1226 
1227 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1228 
1229 	/* Convert ITR value into microseconds for OBFF timer */
1230 	timer = itr & E1000_ITR_MASK;
1231 	timer = (timer * E1000_ITR_MULT) / 1000;
1232 
1233 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1234 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1235 		return -E1000_ERR_CONFIG;
1236 	}
1237 
1238 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1239 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1240 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1241 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1242 
1243 	return E1000_SUCCESS;
1244 }
1245 
1246 /**
1247  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1248  *  @hw: pointer to the HW structure
1249  *  @to_sx: boolean indicating a system power state transition to Sx
1250  *
1251  *  When link is down, configure ULP mode to significantly reduce the power
1252  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1253  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1254  *  system, configure the ULP mode by software.
1255  */
1256 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1257 {
1258 	u32 mac_reg;
1259 	s32 ret_val = E1000_SUCCESS;
1260 	u16 phy_reg;
1261 	u16 oem_reg = 0;
1262 
1263 	if ((hw->mac.type < e1000_pch_lpt) ||
1264 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1265 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1266 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1267 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1268 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1269 		return 0;
1270 
1271 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1272 		/* Request ME configure ULP mode in the PHY */
1273 		mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1274 		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1275 		E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1276 
1277 		goto out;
1278 	}
1279 
1280 	if (!to_sx) {
1281 		int i = 0;
1282 
1283 		/* Poll up to 5 seconds for Cable Disconnected indication */
1284 		while (!(E1000_READ_REG(hw, E1000_FEXT) &
1285 			 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1286 			/* Bail if link is re-acquired */
1287 			if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1288 				return -E1000_ERR_PHY;
1289 
1290 			if (i++ == 100)
1291 				break;
1292 
1293 			msec_delay(50);
1294 		}
1295 		DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1296 			 (E1000_READ_REG(hw, E1000_FEXT) &
1297 			  E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1298 			 i * 50);
1299 	}
1300 
1301 	ret_val = hw->phy.ops.acquire(hw);
1302 	if (ret_val)
1303 		goto out;
1304 
1305 	/* Force SMBus mode in PHY */
1306 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1307 	if (ret_val)
1308 		goto release;
1309 	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1310 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1311 
1312 	/* Force SMBus mode in MAC */
1313 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1314 	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1315 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1316 
1317 	/* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1318 	 * LPLU and disable Gig speed when entering ULP
1319 	 */
1320 	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1321 		ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1322 						       &oem_reg);
1323 		if (ret_val)
1324 			goto release;
1325 
1326 		phy_reg = oem_reg;
1327 		phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1328 
1329 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1330 							phy_reg);
1331 
1332 		if (ret_val)
1333 			goto release;
1334 	}
1335 
1336 	/* Set Inband ULP Exit, Reset to SMBus mode and
1337 	 * Disable SMBus Release on PERST# in PHY
1338 	 */
1339 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1340 	if (ret_val)
1341 		goto release;
1342 	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1343 		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1344 	if (to_sx) {
1345 		if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1346 			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1347 		else
1348 			phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1349 
1350 		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1351 		phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1352 	} else {
1353 		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1354 		phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1355 		phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1356 	}
1357 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1358 
1359 	/* Set Disable SMBus Release on PERST# in MAC */
1360 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1361 	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1362 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1363 
1364 	/* Commit ULP changes in PHY by starting auto ULP configuration */
1365 	phy_reg |= I218_ULP_CONFIG1_START;
1366 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1367 
1368 	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1369 	    to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1370 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1371 							oem_reg);
1372 		if (ret_val)
1373 			goto release;
1374 	}
1375 
1376 release:
1377 	hw->phy.ops.release(hw);
1378 out:
1379 	if (ret_val)
1380 		DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1381 	else
1382 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1383 
1384 	return ret_val;
1385 }
1386 
1387 /**
1388  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1389  *  @hw: pointer to the HW structure
1390  *  @force: boolean indicating whether or not to force disabling ULP
1391  *
1392  *  Un-configure ULP mode when link is up, the system is transitioned from
1393  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1394  *  system, poll for an indication from ME that ULP has been un-configured.
1395  *  If not on an ME enabled system, un-configure the ULP mode by software.
1396  *
1397  *  During nominal operation, this function is called when link is acquired
1398  *  to disable ULP mode (force=FALSE); otherwise, for example when unloading
1399  *  the driver or during Sx->S0 transitions, this is called with force=TRUE
1400  *  to forcibly disable ULP.
1401  */
1402 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1403 {
1404 	s32 ret_val = E1000_SUCCESS;
1405 	u32 mac_reg;
1406 	u16 phy_reg;
1407 	int i = 0;
1408 
1409 	if ((hw->mac.type < e1000_pch_lpt) ||
1410 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1411 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1412 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1413 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1414 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1415 		return 0;
1416 
1417 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1418 		if (force) {
1419 			/* Request ME un-configure ULP mode in the PHY */
1420 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1421 			mac_reg &= ~E1000_H2ME_ULP;
1422 			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1423 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1424 		}
1425 
1426 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1427 		while (E1000_READ_REG(hw, E1000_FWSM) &
1428 		       E1000_FWSM_ULP_CFG_DONE) {
1429 			if (i++ == 30) {
1430 				ret_val = -E1000_ERR_PHY;
1431 				goto out;
1432 			}
1433 
1434 			msec_delay(10);
1435 		}
1436 		DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1437 
1438 		if (force) {
1439 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1440 			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1441 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1442 		} else {
1443 			/* Clear H2ME.ULP after ME ULP configuration */
1444 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1445 			mac_reg &= ~E1000_H2ME_ULP;
1446 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1447 		}
1448 
1449 		goto out;
1450 	}
1451 
1452 	ret_val = hw->phy.ops.acquire(hw);
1453 	if (ret_val)
1454 		goto out;
1455 
1456 	if (force)
1457 		/* Toggle LANPHYPC Value bit */
1458 		e1000_toggle_lanphypc_pch_lpt(hw);
1459 
1460 	/* Unforce SMBus mode in PHY */
1461 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1462 	if (ret_val) {
1463 		/* The MAC might be in PCIe mode, so temporarily force to
1464 		 * SMBus mode in order to access the PHY.
1465 		 */
1466 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1467 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1468 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1469 
1470 		msec_delay(50);
1471 
1472 		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1473 						       &phy_reg);
1474 		if (ret_val)
1475 			goto release;
1476 	}
1477 	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1478 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1479 
1480 	/* Unforce SMBus mode in MAC */
1481 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1482 	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1483 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1484 
1485 	/* When ULP mode was previously entered, K1 was disabled by the
1486 	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1487 	 */
1488 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1489 	if (ret_val)
1490 		goto release;
1491 	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1492 	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1493 
1494 	/* Clear ULP enabled configuration */
1495 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1496 	if (ret_val)
1497 		goto release;
1498 	phy_reg &= ~(I218_ULP_CONFIG1_IND |
1499 		     I218_ULP_CONFIG1_STICKY_ULP |
1500 		     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1501 		     I218_ULP_CONFIG1_WOL_HOST |
1502 		     I218_ULP_CONFIG1_INBAND_EXIT |
1503 		     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1504 		     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1505 		     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1506 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1507 
1508 	/* Commit ULP changes by starting auto ULP configuration */
1509 	phy_reg |= I218_ULP_CONFIG1_START;
1510 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1511 
1512 	/* Clear Disable SMBus Release on PERST# in MAC */
1513 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1514 	mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1515 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1516 
1517 release:
1518 	hw->phy.ops.release(hw);
1519 	if (force) {
1520 		hw->phy.ops.reset(hw);
1521 		msec_delay(50);
1522 	}
1523 out:
1524 	if (ret_val)
1525 		DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1526 	else
1527 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1528 
1529 	return ret_val;
1530 }
1531 
1532 /**
1533  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1534  *  @hw: pointer to the HW structure
1535  *
1536  *  Checks to see of the link status of the hardware has changed.  If a
1537  *  change in link status has been detected, then we read the PHY registers
1538  *  to get the current speed/duplex if link exists.
1539  **/
1540 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1541 {
1542 	struct e1000_mac_info *mac = &hw->mac;
1543 	s32 ret_val, tipg_reg = 0;
1544 	u16 emi_addr, emi_val = 0;
1545 	bool link;
1546 	u16 phy_reg;
1547 
1548 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1549 
1550 	/* We only want to go out to the PHY registers to see if Auto-Neg
1551 	 * has completed and/or if our link status has changed.  The
1552 	 * get_link_status flag is set upon receiving a Link Status
1553 	 * Change or Rx Sequence Error interrupt.
1554 	 */
1555 	if (!mac->get_link_status)
1556 		return E1000_SUCCESS;
1557 
1558 	/* First we want to see if the MII Status Register reports
1559 	 * link.  If so, then we want to get the current speed/duplex
1560 	 * of the PHY.
1561 	 */
1562 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1563 	if (ret_val)
1564 		return ret_val;
1565 
1566 	if (hw->mac.type == e1000_pchlan) {
1567 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1568 		if (ret_val)
1569 			return ret_val;
1570 	}
1571 
1572 	/* When connected at 10Mbps half-duplex, some parts are excessively
1573 	 * aggressive resulting in many collisions. To avoid this, increase
1574 	 * the IPG and reduce Rx latency in the PHY.
1575 	 */
1576 	if (((hw->mac.type == e1000_pch2lan) ||
1577 	     (hw->mac.type == e1000_pch_lpt) ||
1578 	     (hw->mac.type == e1000_pch_spt)) && link) {
1579 		u16 speed, duplex;
1580 
1581 		e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1582 		tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1583 		tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1584 
1585 		if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1586 			tipg_reg |= 0xFF;
1587 			/* Reduce Rx latency in analog PHY */
1588 			emi_val = 0;
1589 		} else if (hw->mac.type == e1000_pch_spt &&
1590 			   duplex == FULL_DUPLEX && speed != SPEED_1000) {
1591 			tipg_reg |= 0xC;
1592 			emi_val = 1;
1593 		} else {
1594 			/* Roll back the default values */
1595 			tipg_reg |= 0x08;
1596 			emi_val = 1;
1597 		}
1598 
1599 		E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1600 
1601 		ret_val = hw->phy.ops.acquire(hw);
1602 		if (ret_val)
1603 			return ret_val;
1604 
1605 		if (hw->mac.type == e1000_pch2lan)
1606 			emi_addr = I82579_RX_CONFIG;
1607 		else
1608 			emi_addr = I217_RX_CONFIG;
1609 		ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1610 
1611 		if (hw->mac.type == e1000_pch_lpt ||
1612 		    hw->mac.type == e1000_pch_spt) {
1613 			u16 phy_reg;
1614 
1615 			hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1616 						    &phy_reg);
1617 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1618 			if (speed == SPEED_100 || speed == SPEED_10)
1619 				phy_reg |= 0x3E8;
1620 			else
1621 				phy_reg |= 0xFA;
1622 			hw->phy.ops.write_reg_locked(hw,
1623 						     I217_PLL_CLOCK_GATE_REG,
1624 						     phy_reg);
1625 		}
1626 		hw->phy.ops.release(hw);
1627 
1628 		if (ret_val)
1629 			return ret_val;
1630 
1631 		if (hw->mac.type == e1000_pch_spt) {
1632 			u16 data;
1633 			u16 ptr_gap;
1634 
1635 			if (speed == SPEED_1000) {
1636 				ret_val = hw->phy.ops.acquire(hw);
1637 				if (ret_val)
1638 					return ret_val;
1639 
1640 				ret_val = hw->phy.ops.read_reg_locked(hw,
1641 							      PHY_REG(776, 20),
1642 							      &data);
1643 				if (ret_val) {
1644 					hw->phy.ops.release(hw);
1645 					return ret_val;
1646 				}
1647 
1648 				ptr_gap = (data & (0x3FF << 2)) >> 2;
1649 				if (ptr_gap < 0x18) {
1650 					data &= ~(0x3FF << 2);
1651 					data |= (0x18 << 2);
1652 					ret_val =
1653 						hw->phy.ops.write_reg_locked(hw,
1654 							PHY_REG(776, 20), data);
1655 				}
1656 				hw->phy.ops.release(hw);
1657 				if (ret_val)
1658 					return ret_val;
1659 			} else {
1660 				ret_val = hw->phy.ops.acquire(hw);
1661 				if (ret_val)
1662 					return ret_val;
1663 
1664 				ret_val = hw->phy.ops.write_reg_locked(hw,
1665 							     PHY_REG(776, 20),
1666 							     0xC023);
1667 				hw->phy.ops.release(hw);
1668 				if (ret_val)
1669 					return ret_val;
1670 
1671 			}
1672 		}
1673 	}
1674 
1675 	/* I217 Packet Loss issue:
1676 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
1677 	 * on power up.
1678 	 * Set the Beacon Duration for I217 to 8 usec
1679 	 */
1680 	if ((hw->mac.type == e1000_pch_lpt) ||
1681 	    (hw->mac.type == e1000_pch_spt)) {
1682 		u32 mac_reg;
1683 
1684 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1685 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1686 		mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1687 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1688 	}
1689 
1690 	/* Work-around I218 hang issue */
1691 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1692 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1693 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1694 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1695 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1696 		if (ret_val)
1697 			return ret_val;
1698 	}
1699 	if ((hw->mac.type == e1000_pch_lpt) ||
1700 	    (hw->mac.type == e1000_pch_spt)) {
1701 		/* Set platform power management values for
1702 		 * Latency Tolerance Reporting (LTR)
1703 		 * Optimized Buffer Flush/Fill (OBFF)
1704 		 */
1705 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1706 		if (ret_val)
1707 			return ret_val;
1708 	}
1709 
1710 	/* Clear link partner's EEE ability */
1711 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1712 
1713 	/* FEXTNVM6 K1-off workaround */
1714 	if (hw->mac.type == e1000_pch_spt) {
1715 		u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1716 		u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1717 
1718 		if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1719 			fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1720 		else
1721 			fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1722 
1723 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1724 	}
1725 
1726 	if (!link)
1727 		return E1000_SUCCESS; /* No link detected */
1728 
1729 	mac->get_link_status = FALSE;
1730 
1731 	switch (hw->mac.type) {
1732 	case e1000_pch2lan:
1733 		ret_val = e1000_k1_workaround_lv(hw);
1734 		if (ret_val)
1735 			return ret_val;
1736 		/* fall-thru */
1737 	case e1000_pchlan:
1738 		if (hw->phy.type == e1000_phy_82578) {
1739 			ret_val = e1000_link_stall_workaround_hv(hw);
1740 			if (ret_val)
1741 				return ret_val;
1742 		}
1743 
1744 		/* Workaround for PCHx parts in half-duplex:
1745 		 * Set the number of preambles removed from the packet
1746 		 * when it is passed from the PHY to the MAC to prevent
1747 		 * the MAC from misinterpreting the packet type.
1748 		 */
1749 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1750 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1751 
1752 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1753 		    E1000_STATUS_FD)
1754 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1755 
1756 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1757 		break;
1758 	default:
1759 		break;
1760 	}
1761 
1762 	/* Check if there was DownShift, must be checked
1763 	 * immediately after link-up
1764 	 */
1765 	e1000_check_downshift_generic(hw);
1766 
1767 	/* Enable/Disable EEE after link up */
1768 	if (hw->phy.type > e1000_phy_82579) {
1769 		ret_val = e1000_set_eee_pchlan(hw);
1770 		if (ret_val)
1771 			return ret_val;
1772 	}
1773 
1774 	/* If we are forcing speed/duplex, then we simply return since
1775 	 * we have already determined whether we have link or not.
1776 	 */
1777 	if (!mac->autoneg)
1778 		return -E1000_ERR_CONFIG;
1779 
1780 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1781 	 * of MAC speed/duplex configuration.  So we only need to
1782 	 * configure Collision Distance in the MAC.
1783 	 */
1784 	mac->ops.config_collision_dist(hw);
1785 
1786 	/* Configure Flow Control now that Auto-Neg has completed.
1787 	 * First, we need to restore the desired flow control
1788 	 * settings because we may have had to re-autoneg with a
1789 	 * different link partner.
1790 	 */
1791 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1792 	if (ret_val)
1793 		DEBUGOUT("Error configuring flow control\n");
1794 
1795 	return ret_val;
1796 }
1797 
1798 /**
1799  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1800  *  @hw: pointer to the HW structure
1801  *
1802  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1803  **/
1804 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1805 {
1806 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1807 
1808 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1809 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1810 	switch (hw->mac.type) {
1811 	case e1000_ich8lan:
1812 	case e1000_ich9lan:
1813 	case e1000_ich10lan:
1814 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1815 		break;
1816 	case e1000_pchlan:
1817 	case e1000_pch2lan:
1818 	case e1000_pch_lpt:
1819 	case e1000_pch_spt:
1820 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1821 		break;
1822 	default:
1823 		break;
1824 	}
1825 }
1826 
1827 /**
1828  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1829  *  @hw: pointer to the HW structure
1830  *
1831  *  Acquires the mutex for performing NVM operations.
1832  **/
1833 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1834 {
1835 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1836 	return E1000_SUCCESS;
1837 }
1838 
1839 /**
1840  *  e1000_release_nvm_ich8lan - Release NVM mutex
1841  *  @hw: pointer to the HW structure
1842  *
1843  *  Releases the mutex used while performing NVM operations.
1844  **/
1845 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1846 {
1847 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1848 	return;
1849 }
1850 
1851 /**
1852  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1853  *  @hw: pointer to the HW structure
1854  *
1855  *  Acquires the software control flag for performing PHY and select
1856  *  MAC CSR accesses.
1857  **/
1858 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1859 {
1860 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1861 	s32 ret_val = E1000_SUCCESS;
1862 
1863 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1864 
1865 	while (timeout) {
1866 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1867 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1868 			break;
1869 
1870 		msec_delay_irq(1);
1871 		timeout--;
1872 	}
1873 
1874 	if (!timeout) {
1875 		DEBUGOUT("SW has already locked the resource.\n");
1876 		ret_val = -E1000_ERR_CONFIG;
1877 		goto out;
1878 	}
1879 
1880 	timeout = SW_FLAG_TIMEOUT;
1881 
1882 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1883 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1884 
1885 	while (timeout) {
1886 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1887 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1888 			break;
1889 
1890 		msec_delay_irq(1);
1891 		timeout--;
1892 	}
1893 
1894 	if (!timeout) {
1895 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1896 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1897 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1898 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1899 		ret_val = -E1000_ERR_CONFIG;
1900 		goto out;
1901 	}
1902 
1903 out:
1904 	return ret_val;
1905 }
1906 
1907 /**
1908  *  e1000_release_swflag_ich8lan - Release software control flag
1909  *  @hw: pointer to the HW structure
1910  *
1911  *  Releases the software control flag for performing PHY and select
1912  *  MAC CSR accesses.
1913  **/
1914 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1915 {
1916 	u32 extcnf_ctrl;
1917 
1918 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1919 
1920 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1921 
1922 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1923 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1924 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1925 	} else {
1926 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1927 	}
1928 	return;
1929 }
1930 
1931 /**
1932  *  e1000_check_mng_mode_ich8lan - Checks management mode
1933  *  @hw: pointer to the HW structure
1934  *
1935  *  This checks if the adapter has any manageability enabled.
1936  *  This is a function pointer entry point only called by read/write
1937  *  routines for the PHY and NVM parts.
1938  **/
1939 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1940 {
1941 	u32 fwsm;
1942 
1943 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1944 
1945 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1946 
1947 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1948 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1949 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1950 }
1951 
1952 /**
1953  *  e1000_check_mng_mode_pchlan - Checks management mode
1954  *  @hw: pointer to the HW structure
1955  *
1956  *  This checks if the adapter has iAMT enabled.
1957  *  This is a function pointer entry point only called by read/write
1958  *  routines for the PHY and NVM parts.
1959  **/
1960 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1961 {
1962 	u32 fwsm;
1963 
1964 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1965 
1966 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1967 
1968 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1969 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1970 }
1971 
1972 /**
1973  *  e1000_rar_set_pch2lan - Set receive address register
1974  *  @hw: pointer to the HW structure
1975  *  @addr: pointer to the receive address
1976  *  @index: receive address array register
1977  *
1978  *  Sets the receive address array register at index to the address passed
1979  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1980  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1981  *  Use SHRA[0-3] in place of those reserved for ME.
1982  **/
1983 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1984 {
1985 	u32 rar_low, rar_high;
1986 
1987 	DEBUGFUNC("e1000_rar_set_pch2lan");
1988 
1989 	/* HW expects these in little endian so we reverse the byte order
1990 	 * from network order (big endian) to little endian
1991 	 */
1992 	rar_low = ((u32) addr[0] |
1993 		   ((u32) addr[1] << 8) |
1994 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1995 
1996 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1997 
1998 	/* If MAC address zero, no need to set the AV bit */
1999 	if (rar_low || rar_high)
2000 		rar_high |= E1000_RAH_AV;
2001 
2002 	if (index == 0) {
2003 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2004 		E1000_WRITE_FLUSH(hw);
2005 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2006 		E1000_WRITE_FLUSH(hw);
2007 		return E1000_SUCCESS;
2008 	}
2009 
2010 	/* RAR[1-6] are owned by manageability.  Skip those and program the
2011 	 * next address into the SHRA register array.
2012 	 */
2013 	if (index < (u32) (hw->mac.rar_entry_count)) {
2014 		s32 ret_val;
2015 
2016 		ret_val = e1000_acquire_swflag_ich8lan(hw);
2017 		if (ret_val)
2018 			goto out;
2019 
2020 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2021 		E1000_WRITE_FLUSH(hw);
2022 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2023 		E1000_WRITE_FLUSH(hw);
2024 
2025 		e1000_release_swflag_ich8lan(hw);
2026 
2027 		/* verify the register updates */
2028 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2029 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2030 			return E1000_SUCCESS;
2031 
2032 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2033 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2034 	}
2035 
2036 out:
2037 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2038 	return -E1000_ERR_CONFIG;
2039 }
2040 
2041 /**
2042  *  e1000_rar_set_pch_lpt - Set receive address registers
2043  *  @hw: pointer to the HW structure
2044  *  @addr: pointer to the receive address
2045  *  @index: receive address array register
2046  *
2047  *  Sets the receive address register array at index to the address passed
2048  *  in by addr. For LPT, RAR[0] is the base address register that is to
2049  *  contain the MAC address. SHRA[0-10] are the shared receive address
2050  *  registers that are shared between the Host and manageability engine (ME).
2051  **/
2052 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2053 {
2054 	u32 rar_low, rar_high;
2055 	u32 wlock_mac;
2056 
2057 	DEBUGFUNC("e1000_rar_set_pch_lpt");
2058 
2059 	/* HW expects these in little endian so we reverse the byte order
2060 	 * from network order (big endian) to little endian
2061 	 */
2062 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2063 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2064 
2065 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2066 
2067 	/* If MAC address zero, no need to set the AV bit */
2068 	if (rar_low || rar_high)
2069 		rar_high |= E1000_RAH_AV;
2070 
2071 	if (index == 0) {
2072 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2073 		E1000_WRITE_FLUSH(hw);
2074 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2075 		E1000_WRITE_FLUSH(hw);
2076 		return E1000_SUCCESS;
2077 	}
2078 
2079 	/* The manageability engine (ME) can lock certain SHRAR registers that
2080 	 * it is using - those registers are unavailable for use.
2081 	 */
2082 	if (index < hw->mac.rar_entry_count) {
2083 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2084 			    E1000_FWSM_WLOCK_MAC_MASK;
2085 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2086 
2087 		/* Check if all SHRAR registers are locked */
2088 		if (wlock_mac == 1)
2089 			goto out;
2090 
2091 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
2092 			s32 ret_val;
2093 
2094 			ret_val = e1000_acquire_swflag_ich8lan(hw);
2095 
2096 			if (ret_val)
2097 				goto out;
2098 
2099 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2100 					rar_low);
2101 			E1000_WRITE_FLUSH(hw);
2102 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2103 					rar_high);
2104 			E1000_WRITE_FLUSH(hw);
2105 
2106 			e1000_release_swflag_ich8lan(hw);
2107 
2108 			/* verify the register updates */
2109 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2110 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2111 				return E1000_SUCCESS;
2112 		}
2113 	}
2114 
2115 out:
2116 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2117 	return -E1000_ERR_CONFIG;
2118 }
2119 
2120 /**
2121  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2122  *  @hw: pointer to the HW structure
2123  *  @mc_addr_list: array of multicast addresses to program
2124  *  @mc_addr_count: number of multicast addresses to program
2125  *
2126  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2127  *  The caller must have a packed mc_addr_list of multicast addresses.
2128  **/
2129 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2130 					      u8 *mc_addr_list,
2131 					      u32 mc_addr_count)
2132 {
2133 	u16 phy_reg = 0;
2134 	int i;
2135 	s32 ret_val;
2136 
2137 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2138 
2139 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2140 
2141 	ret_val = hw->phy.ops.acquire(hw);
2142 	if (ret_val)
2143 		return;
2144 
2145 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2146 	if (ret_val)
2147 		goto release;
2148 
2149 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
2150 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2151 					   (u16)(hw->mac.mta_shadow[i] &
2152 						 0xFFFF));
2153 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2154 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
2155 						 0xFFFF));
2156 	}
2157 
2158 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2159 
2160 release:
2161 	hw->phy.ops.release(hw);
2162 }
2163 
2164 /**
2165  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2166  *  @hw: pointer to the HW structure
2167  *
2168  *  Checks if firmware is blocking the reset of the PHY.
2169  *  This is a function pointer entry point only called by
2170  *  reset routines.
2171  **/
2172 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2173 {
2174 	u32 fwsm;
2175 	bool blocked = FALSE;
2176 	int i = 0;
2177 
2178 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
2179 
2180 	do {
2181 		fwsm = E1000_READ_REG(hw, E1000_FWSM);
2182 		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2183 			blocked = TRUE;
2184 			msec_delay(10);
2185 			continue;
2186 		}
2187 		blocked = FALSE;
2188 	} while (blocked && (i++ < 30));
2189 	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2190 }
2191 
2192 /**
2193  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2194  *  @hw: pointer to the HW structure
2195  *
2196  *  Assumes semaphore already acquired.
2197  *
2198  **/
2199 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2200 {
2201 	u16 phy_data;
2202 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2203 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2204 		E1000_STRAP_SMT_FREQ_SHIFT;
2205 	s32 ret_val;
2206 
2207 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2208 
2209 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2210 	if (ret_val)
2211 		return ret_val;
2212 
2213 	phy_data &= ~HV_SMB_ADDR_MASK;
2214 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2215 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2216 
2217 	if (hw->phy.type == e1000_phy_i217) {
2218 		/* Restore SMBus frequency */
2219 		if (freq--) {
2220 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2221 			phy_data |= (freq & (1 << 0)) <<
2222 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
2223 			phy_data |= (freq & (1 << 1)) <<
2224 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2225 		} else {
2226 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
2227 		}
2228 	}
2229 
2230 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2231 }
2232 
2233 /**
2234  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2235  *  @hw:   pointer to the HW structure
2236  *
2237  *  SW should configure the LCD from the NVM extended configuration region
2238  *  as a workaround for certain parts.
2239  **/
2240 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2241 {
2242 	struct e1000_phy_info *phy = &hw->phy;
2243 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2244 	s32 ret_val = E1000_SUCCESS;
2245 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2246 
2247 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2248 
2249 	/* Initialize the PHY from the NVM on ICH platforms.  This
2250 	 * is needed due to an issue where the NVM configuration is
2251 	 * not properly autoloaded after power transitions.
2252 	 * Therefore, after each PHY reset, we will load the
2253 	 * configuration data out of the NVM manually.
2254 	 */
2255 	switch (hw->mac.type) {
2256 	case e1000_ich8lan:
2257 		if (phy->type != e1000_phy_igp_3)
2258 			return ret_val;
2259 
2260 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2261 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2262 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2263 			break;
2264 		}
2265 		/* Fall-thru */
2266 	case e1000_pchlan:
2267 	case e1000_pch2lan:
2268 	case e1000_pch_lpt:
2269 	case e1000_pch_spt:
2270 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2271 		break;
2272 	default:
2273 		return ret_val;
2274 	}
2275 
2276 	ret_val = hw->phy.ops.acquire(hw);
2277 	if (ret_val)
2278 		return ret_val;
2279 
2280 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
2281 	if (!(data & sw_cfg_mask))
2282 		goto release;
2283 
2284 	/* Make sure HW does not configure LCD from PHY
2285 	 * extended configuration before SW configuration
2286 	 */
2287 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2288 	if ((hw->mac.type < e1000_pch2lan) &&
2289 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2290 			goto release;
2291 
2292 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2293 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2294 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2295 	if (!cnf_size)
2296 		goto release;
2297 
2298 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2299 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2300 
2301 	if (((hw->mac.type == e1000_pchlan) &&
2302 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2303 	    (hw->mac.type > e1000_pchlan)) {
2304 		/* HW configures the SMBus address and LEDs when the
2305 		 * OEM and LCD Write Enable bits are set in the NVM.
2306 		 * When both NVM bits are cleared, SW will configure
2307 		 * them instead.
2308 		 */
2309 		ret_val = e1000_write_smbus_addr(hw);
2310 		if (ret_val)
2311 			goto release;
2312 
2313 		data = E1000_READ_REG(hw, E1000_LEDCTL);
2314 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2315 							(u16)data);
2316 		if (ret_val)
2317 			goto release;
2318 	}
2319 
2320 	/* Configure LCD from extended configuration region. */
2321 
2322 	/* cnf_base_addr is in DWORD */
2323 	word_addr = (u16)(cnf_base_addr << 1);
2324 
2325 	for (i = 0; i < cnf_size; i++) {
2326 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2327 					   &reg_data);
2328 		if (ret_val)
2329 			goto release;
2330 
2331 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2332 					   1, &reg_addr);
2333 		if (ret_val)
2334 			goto release;
2335 
2336 		/* Save off the PHY page for future writes. */
2337 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2338 			phy_page = reg_data;
2339 			continue;
2340 		}
2341 
2342 		reg_addr &= PHY_REG_MASK;
2343 		reg_addr |= phy_page;
2344 
2345 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2346 						    reg_data);
2347 		if (ret_val)
2348 			goto release;
2349 	}
2350 
2351 release:
2352 	hw->phy.ops.release(hw);
2353 	return ret_val;
2354 }
2355 
2356 /**
2357  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2358  *  @hw:   pointer to the HW structure
2359  *  @link: link up bool flag
2360  *
2361  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2362  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2363  *  If link is down, the function will restore the default K1 setting located
2364  *  in the NVM.
2365  **/
2366 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2367 {
2368 	s32 ret_val = E1000_SUCCESS;
2369 	u16 status_reg = 0;
2370 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2371 
2372 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
2373 
2374 	if (hw->mac.type != e1000_pchlan)
2375 		return E1000_SUCCESS;
2376 
2377 	/* Wrap the whole flow with the sw flag */
2378 	ret_val = hw->phy.ops.acquire(hw);
2379 	if (ret_val)
2380 		return ret_val;
2381 
2382 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2383 	if (link) {
2384 		if (hw->phy.type == e1000_phy_82578) {
2385 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2386 							      &status_reg);
2387 			if (ret_val)
2388 				goto release;
2389 
2390 			status_reg &= (BM_CS_STATUS_LINK_UP |
2391 				       BM_CS_STATUS_RESOLVED |
2392 				       BM_CS_STATUS_SPEED_MASK);
2393 
2394 			if (status_reg == (BM_CS_STATUS_LINK_UP |
2395 					   BM_CS_STATUS_RESOLVED |
2396 					   BM_CS_STATUS_SPEED_1000))
2397 				k1_enable = FALSE;
2398 		}
2399 
2400 		if (hw->phy.type == e1000_phy_82577) {
2401 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2402 							      &status_reg);
2403 			if (ret_val)
2404 				goto release;
2405 
2406 			status_reg &= (HV_M_STATUS_LINK_UP |
2407 				       HV_M_STATUS_AUTONEG_COMPLETE |
2408 				       HV_M_STATUS_SPEED_MASK);
2409 
2410 			if (status_reg == (HV_M_STATUS_LINK_UP |
2411 					   HV_M_STATUS_AUTONEG_COMPLETE |
2412 					   HV_M_STATUS_SPEED_1000))
2413 				k1_enable = FALSE;
2414 		}
2415 
2416 		/* Link stall fix for link up */
2417 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2418 						       0x0100);
2419 		if (ret_val)
2420 			goto release;
2421 
2422 	} else {
2423 		/* Link stall fix for link down */
2424 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2425 						       0x4100);
2426 		if (ret_val)
2427 			goto release;
2428 	}
2429 
2430 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2431 
2432 release:
2433 	hw->phy.ops.release(hw);
2434 
2435 	return ret_val;
2436 }
2437 
2438 /**
2439  *  e1000_configure_k1_ich8lan - Configure K1 power state
2440  *  @hw: pointer to the HW structure
2441  *  @enable: K1 state to configure
2442  *
2443  *  Configure the K1 power state based on the provided parameter.
2444  *  Assumes semaphore already acquired.
2445  *
2446  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2447  **/
2448 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2449 {
2450 	s32 ret_val;
2451 	u32 ctrl_reg = 0;
2452 	u32 ctrl_ext = 0;
2453 	u32 reg = 0;
2454 	u16 kmrn_reg = 0;
2455 
2456 	DEBUGFUNC("e1000_configure_k1_ich8lan");
2457 
2458 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2459 					     &kmrn_reg);
2460 	if (ret_val)
2461 		return ret_val;
2462 
2463 	if (k1_enable)
2464 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2465 	else
2466 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2467 
2468 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2469 					      kmrn_reg);
2470 	if (ret_val)
2471 		return ret_val;
2472 
2473 	usec_delay(20);
2474 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2475 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2476 
2477 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2478 	reg |= E1000_CTRL_FRCSPD;
2479 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2480 
2481 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2482 	E1000_WRITE_FLUSH(hw);
2483 	usec_delay(20);
2484 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2485 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2486 	E1000_WRITE_FLUSH(hw);
2487 	usec_delay(20);
2488 
2489 	return E1000_SUCCESS;
2490 }
2491 
2492 /**
2493  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2494  *  @hw:       pointer to the HW structure
2495  *  @d0_state: boolean if entering d0 or d3 device state
2496  *
2497  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2498  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2499  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2500  **/
2501 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2502 {
2503 	s32 ret_val = 0;
2504 	u32 mac_reg;
2505 	u16 oem_reg;
2506 
2507 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2508 
2509 	if (hw->mac.type < e1000_pchlan)
2510 		return ret_val;
2511 
2512 	ret_val = hw->phy.ops.acquire(hw);
2513 	if (ret_val)
2514 		return ret_val;
2515 
2516 	if (hw->mac.type == e1000_pchlan) {
2517 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2518 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2519 			goto release;
2520 	}
2521 
2522 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2523 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2524 		goto release;
2525 
2526 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2527 
2528 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2529 	if (ret_val)
2530 		goto release;
2531 
2532 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2533 
2534 	if (d0_state) {
2535 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2536 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2537 
2538 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2539 			oem_reg |= HV_OEM_BITS_LPLU;
2540 	} else {
2541 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2542 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2543 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2544 
2545 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2546 		    E1000_PHY_CTRL_NOND0A_LPLU))
2547 			oem_reg |= HV_OEM_BITS_LPLU;
2548 	}
2549 
2550 	/* Set Restart auto-neg to activate the bits */
2551 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2552 	    !hw->phy.ops.check_reset_block(hw))
2553 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2554 
2555 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2556 
2557 release:
2558 	hw->phy.ops.release(hw);
2559 
2560 	return ret_val;
2561 }
2562 
2563 
2564 /**
2565  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2566  *  @hw:   pointer to the HW structure
2567  **/
2568 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2569 {
2570 	s32 ret_val;
2571 	u16 data;
2572 
2573 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2574 
2575 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2576 	if (ret_val)
2577 		return ret_val;
2578 
2579 	data |= HV_KMRN_MDIO_SLOW;
2580 
2581 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2582 
2583 	return ret_val;
2584 }
2585 
2586 /**
2587  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2588  *  done after every PHY reset.
2589  **/
2590 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2591 {
2592 	s32 ret_val = E1000_SUCCESS;
2593 	u16 phy_data;
2594 
2595 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2596 
2597 	if (hw->mac.type != e1000_pchlan)
2598 		return E1000_SUCCESS;
2599 
2600 	/* Set MDIO slow mode before any other MDIO access */
2601 	if (hw->phy.type == e1000_phy_82577) {
2602 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2603 		if (ret_val)
2604 			return ret_val;
2605 	}
2606 
2607 	if (((hw->phy.type == e1000_phy_82577) &&
2608 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2609 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2610 		/* Disable generation of early preamble */
2611 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2612 		if (ret_val)
2613 			return ret_val;
2614 
2615 		/* Preamble tuning for SSC */
2616 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2617 						0xA204);
2618 		if (ret_val)
2619 			return ret_val;
2620 	}
2621 
2622 	if (hw->phy.type == e1000_phy_82578) {
2623 		/* Return registers to default by doing a soft reset then
2624 		 * writing 0x3140 to the control register.
2625 		 */
2626 		if (hw->phy.revision < 2) {
2627 			e1000_phy_sw_reset_generic(hw);
2628 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2629 							0x3140);
2630 		}
2631 	}
2632 
2633 	/* Select page 0 */
2634 	ret_val = hw->phy.ops.acquire(hw);
2635 	if (ret_val)
2636 		return ret_val;
2637 
2638 	hw->phy.addr = 1;
2639 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2640 	hw->phy.ops.release(hw);
2641 	if (ret_val)
2642 		return ret_val;
2643 
2644 	/* Configure the K1 Si workaround during phy reset assuming there is
2645 	 * link so that it disables K1 if link is in 1Gbps.
2646 	 */
2647 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2648 	if (ret_val)
2649 		return ret_val;
2650 
2651 	/* Workaround for link disconnects on a busy hub in half duplex */
2652 	ret_val = hw->phy.ops.acquire(hw);
2653 	if (ret_val)
2654 		return ret_val;
2655 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2656 	if (ret_val)
2657 		goto release;
2658 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2659 					       phy_data & 0x00FF);
2660 	if (ret_val)
2661 		goto release;
2662 
2663 	/* set MSE higher to enable link to stay up when noise is high */
2664 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2665 release:
2666 	hw->phy.ops.release(hw);
2667 
2668 	return ret_val;
2669 }
2670 
2671 /**
2672  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2673  *  @hw:   pointer to the HW structure
2674  **/
2675 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2676 {
2677 	u32 mac_reg;
2678 	u16 i, phy_reg = 0;
2679 	s32 ret_val;
2680 
2681 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2682 
2683 	ret_val = hw->phy.ops.acquire(hw);
2684 	if (ret_val)
2685 		return;
2686 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2687 	if (ret_val)
2688 		goto release;
2689 
2690 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2691 	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2692 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2693 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2694 					   (u16)(mac_reg & 0xFFFF));
2695 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2696 					   (u16)((mac_reg >> 16) & 0xFFFF));
2697 
2698 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2699 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2700 					   (u16)(mac_reg & 0xFFFF));
2701 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2702 					   (u16)((mac_reg & E1000_RAH_AV)
2703 						 >> 16));
2704 	}
2705 
2706 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2707 
2708 release:
2709 	hw->phy.ops.release(hw);
2710 }
2711 
2712 static u32 e1000_calc_rx_da_crc(u8 mac[])
2713 {
2714 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2715 	u32 i, j, mask, crc;
2716 
2717 	DEBUGFUNC("e1000_calc_rx_da_crc");
2718 
2719 	crc = 0xffffffff;
2720 	for (i = 0; i < 6; i++) {
2721 		crc = crc ^ mac[i];
2722 		for (j = 8; j > 0; j--) {
2723 			mask = (crc & 1) * (-1);
2724 			crc = (crc >> 1) ^ (poly & mask);
2725 		}
2726 	}
2727 	return ~crc;
2728 }
2729 
2730 /**
2731  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2732  *  with 82579 PHY
2733  *  @hw: pointer to the HW structure
2734  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2735  **/
2736 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2737 {
2738 	s32 ret_val = E1000_SUCCESS;
2739 	u16 phy_reg, data;
2740 	u32 mac_reg;
2741 	u16 i;
2742 
2743 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2744 
2745 	if (hw->mac.type < e1000_pch2lan)
2746 		return E1000_SUCCESS;
2747 
2748 	/* disable Rx path while enabling/disabling workaround */
2749 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2750 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2751 					phy_reg | (1 << 14));
2752 	if (ret_val)
2753 		return ret_val;
2754 
2755 	if (enable) {
2756 		/* Write Rx addresses (rar_entry_count for RAL/H, and
2757 		 * SHRAL/H) and initial CRC values to the MAC
2758 		 */
2759 		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2760 			u8 mac_addr[ETH_ADDR_LEN] = {0};
2761 			u32 addr_high, addr_low;
2762 
2763 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2764 			if (!(addr_high & E1000_RAH_AV))
2765 				continue;
2766 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2767 			mac_addr[0] = (addr_low & 0xFF);
2768 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2769 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2770 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2771 			mac_addr[4] = (addr_high & 0xFF);
2772 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2773 
2774 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2775 					e1000_calc_rx_da_crc(mac_addr));
2776 		}
2777 
2778 		/* Write Rx addresses to the PHY */
2779 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2780 
2781 		/* Enable jumbo frame workaround in the MAC */
2782 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2783 		mac_reg &= ~(1 << 14);
2784 		mac_reg |= (7 << 15);
2785 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2786 
2787 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2788 		mac_reg |= E1000_RCTL_SECRC;
2789 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2790 
2791 		ret_val = e1000_read_kmrn_reg_generic(hw,
2792 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2793 						&data);
2794 		if (ret_val)
2795 			return ret_val;
2796 		ret_val = e1000_write_kmrn_reg_generic(hw,
2797 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2798 						data | (1 << 0));
2799 		if (ret_val)
2800 			return ret_val;
2801 		ret_val = e1000_read_kmrn_reg_generic(hw,
2802 						E1000_KMRNCTRLSTA_HD_CTRL,
2803 						&data);
2804 		if (ret_val)
2805 			return ret_val;
2806 		data &= ~(0xF << 8);
2807 		data |= (0xB << 8);
2808 		ret_val = e1000_write_kmrn_reg_generic(hw,
2809 						E1000_KMRNCTRLSTA_HD_CTRL,
2810 						data);
2811 		if (ret_val)
2812 			return ret_val;
2813 
2814 		/* Enable jumbo frame workaround in the PHY */
2815 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2816 		data &= ~(0x7F << 5);
2817 		data |= (0x37 << 5);
2818 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2819 		if (ret_val)
2820 			return ret_val;
2821 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2822 		data &= ~(1 << 13);
2823 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2824 		if (ret_val)
2825 			return ret_val;
2826 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2827 		data &= ~(0x3FF << 2);
2828 		data |= (E1000_TX_PTR_GAP << 2);
2829 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2830 		if (ret_val)
2831 			return ret_val;
2832 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2833 		if (ret_val)
2834 			return ret_val;
2835 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2836 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2837 						(1 << 10));
2838 		if (ret_val)
2839 			return ret_val;
2840 	} else {
2841 		/* Write MAC register values back to h/w defaults */
2842 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2843 		mac_reg &= ~(0xF << 14);
2844 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2845 
2846 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2847 		mac_reg &= ~E1000_RCTL_SECRC;
2848 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2849 
2850 		ret_val = e1000_read_kmrn_reg_generic(hw,
2851 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2852 						&data);
2853 		if (ret_val)
2854 			return ret_val;
2855 		ret_val = e1000_write_kmrn_reg_generic(hw,
2856 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2857 						data & ~(1 << 0));
2858 		if (ret_val)
2859 			return ret_val;
2860 		ret_val = e1000_read_kmrn_reg_generic(hw,
2861 						E1000_KMRNCTRLSTA_HD_CTRL,
2862 						&data);
2863 		if (ret_val)
2864 			return ret_val;
2865 		data &= ~(0xF << 8);
2866 		data |= (0xB << 8);
2867 		ret_val = e1000_write_kmrn_reg_generic(hw,
2868 						E1000_KMRNCTRLSTA_HD_CTRL,
2869 						data);
2870 		if (ret_val)
2871 			return ret_val;
2872 
2873 		/* Write PHY register values back to h/w defaults */
2874 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2875 		data &= ~(0x7F << 5);
2876 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2877 		if (ret_val)
2878 			return ret_val;
2879 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2880 		data |= (1 << 13);
2881 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2882 		if (ret_val)
2883 			return ret_val;
2884 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2885 		data &= ~(0x3FF << 2);
2886 		data |= (0x8 << 2);
2887 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2888 		if (ret_val)
2889 			return ret_val;
2890 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2891 		if (ret_val)
2892 			return ret_val;
2893 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2894 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2895 						~(1 << 10));
2896 		if (ret_val)
2897 			return ret_val;
2898 	}
2899 
2900 	/* re-enable Rx path after enabling/disabling workaround */
2901 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2902 				     ~(1 << 14));
2903 }
2904 
2905 /**
2906  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2907  *  done after every PHY reset.
2908  **/
2909 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2910 {
2911 	s32 ret_val = E1000_SUCCESS;
2912 
2913 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2914 
2915 	if (hw->mac.type != e1000_pch2lan)
2916 		return E1000_SUCCESS;
2917 
2918 	/* Set MDIO slow mode before any other MDIO access */
2919 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2920 	if (ret_val)
2921 		return ret_val;
2922 
2923 	ret_val = hw->phy.ops.acquire(hw);
2924 	if (ret_val)
2925 		return ret_val;
2926 	/* set MSE higher to enable link to stay up when noise is high */
2927 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2928 	if (ret_val)
2929 		goto release;
2930 	/* drop link after 5 times MSE threshold was reached */
2931 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2932 release:
2933 	hw->phy.ops.release(hw);
2934 
2935 	return ret_val;
2936 }
2937 
2938 /**
2939  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2940  *  @hw:   pointer to the HW structure
2941  *
2942  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2943  *  Disable K1 for 1000 and 100 speeds
2944  **/
2945 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2946 {
2947 	s32 ret_val = E1000_SUCCESS;
2948 	u16 status_reg = 0;
2949 
2950 	DEBUGFUNC("e1000_k1_workaround_lv");
2951 
2952 	if (hw->mac.type != e1000_pch2lan)
2953 		return E1000_SUCCESS;
2954 
2955 	/* Set K1 beacon duration based on 10Mbs speed */
2956 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2957 	if (ret_val)
2958 		return ret_val;
2959 
2960 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2961 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2962 		if (status_reg &
2963 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2964 			u16 pm_phy_reg;
2965 
2966 			/* LV 1G/100 Packet drop issue wa  */
2967 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2968 						       &pm_phy_reg);
2969 			if (ret_val)
2970 				return ret_val;
2971 			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2972 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2973 							pm_phy_reg);
2974 			if (ret_val)
2975 				return ret_val;
2976 		} else {
2977 			u32 mac_reg;
2978 			mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2979 			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2980 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2981 			E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2982 		}
2983 	}
2984 
2985 	return ret_val;
2986 }
2987 
2988 /**
2989  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2990  *  @hw:   pointer to the HW structure
2991  *  @gate: boolean set to TRUE to gate, FALSE to ungate
2992  *
2993  *  Gate/ungate the automatic PHY configuration via hardware; perform
2994  *  the configuration via software instead.
2995  **/
2996 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2997 {
2998 	u32 extcnf_ctrl;
2999 
3000 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3001 
3002 	if (hw->mac.type < e1000_pch2lan)
3003 		return;
3004 
3005 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3006 
3007 	if (gate)
3008 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3009 	else
3010 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3011 
3012 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3013 }
3014 
3015 /**
3016  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3017  *  @hw: pointer to the HW structure
3018  *
3019  *  Check the appropriate indication the MAC has finished configuring the
3020  *  PHY after a software reset.
3021  **/
3022 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3023 {
3024 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3025 
3026 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
3027 
3028 	/* Wait for basic configuration completes before proceeding */
3029 	do {
3030 		data = E1000_READ_REG(hw, E1000_STATUS);
3031 		data &= E1000_STATUS_LAN_INIT_DONE;
3032 		usec_delay(100);
3033 	} while ((!data) && --loop);
3034 
3035 	/* If basic configuration is incomplete before the above loop
3036 	 * count reaches 0, loading the configuration from NVM will
3037 	 * leave the PHY in a bad state possibly resulting in no link.
3038 	 */
3039 	if (loop == 0)
3040 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3041 
3042 	/* Clear the Init Done bit for the next init event */
3043 	data = E1000_READ_REG(hw, E1000_STATUS);
3044 	data &= ~E1000_STATUS_LAN_INIT_DONE;
3045 	E1000_WRITE_REG(hw, E1000_STATUS, data);
3046 }
3047 
3048 /**
3049  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3050  *  @hw: pointer to the HW structure
3051  **/
3052 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3053 {
3054 	s32 ret_val = E1000_SUCCESS;
3055 	u16 reg;
3056 
3057 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3058 
3059 	if (hw->phy.ops.check_reset_block(hw))
3060 		return E1000_SUCCESS;
3061 
3062 	/* Allow time for h/w to get to quiescent state after reset */
3063 	msec_delay(10);
3064 
3065 	/* Perform any necessary post-reset workarounds */
3066 	switch (hw->mac.type) {
3067 	case e1000_pchlan:
3068 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3069 		if (ret_val)
3070 			return ret_val;
3071 		break;
3072 	case e1000_pch2lan:
3073 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3074 		if (ret_val)
3075 			return ret_val;
3076 		break;
3077 	default:
3078 		break;
3079 	}
3080 
3081 	/* Clear the host wakeup bit after lcd reset */
3082 	if (hw->mac.type >= e1000_pchlan) {
3083 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3084 		reg &= ~BM_WUC_HOST_WU_BIT;
3085 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3086 	}
3087 
3088 	/* Configure the LCD with the extended configuration region in NVM */
3089 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
3090 	if (ret_val)
3091 		return ret_val;
3092 
3093 	/* Configure the LCD with the OEM bits in NVM */
3094 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3095 
3096 	if (hw->mac.type == e1000_pch2lan) {
3097 		/* Ungate automatic PHY configuration on non-managed 82579 */
3098 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
3099 		    E1000_ICH_FWSM_FW_VALID)) {
3100 			msec_delay(10);
3101 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3102 		}
3103 
3104 		/* Set EEE LPI Update Timer to 200usec */
3105 		ret_val = hw->phy.ops.acquire(hw);
3106 		if (ret_val)
3107 			return ret_val;
3108 		ret_val = e1000_write_emi_reg_locked(hw,
3109 						     I82579_LPI_UPDATE_TIMER,
3110 						     0x1387);
3111 		hw->phy.ops.release(hw);
3112 	}
3113 
3114 	return ret_val;
3115 }
3116 
3117 /**
3118  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3119  *  @hw: pointer to the HW structure
3120  *
3121  *  Resets the PHY
3122  *  This is a function pointer entry point called by drivers
3123  *  or other shared routines.
3124  **/
3125 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3126 {
3127 	s32 ret_val = E1000_SUCCESS;
3128 
3129 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3130 
3131 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
3132 	if ((hw->mac.type == e1000_pch2lan) &&
3133 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3134 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3135 
3136 	ret_val = e1000_phy_hw_reset_generic(hw);
3137 	if (ret_val)
3138 		return ret_val;
3139 
3140 	return e1000_post_phy_reset_ich8lan(hw);
3141 }
3142 
3143 /**
3144  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3145  *  @hw: pointer to the HW structure
3146  *  @active: TRUE to enable LPLU, FALSE to disable
3147  *
3148  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3149  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3150  *  the phy speed. This function will manually set the LPLU bit and restart
3151  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3152  *  since it configures the same bit.
3153  **/
3154 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3155 {
3156 	s32 ret_val;
3157 	u16 oem_reg;
3158 
3159 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
3160 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3161 	if (ret_val)
3162 		return ret_val;
3163 
3164 	if (active)
3165 		oem_reg |= HV_OEM_BITS_LPLU;
3166 	else
3167 		oem_reg &= ~HV_OEM_BITS_LPLU;
3168 
3169 	if (!hw->phy.ops.check_reset_block(hw))
3170 		oem_reg |= HV_OEM_BITS_RESTART_AN;
3171 
3172 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3173 }
3174 
3175 /**
3176  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3177  *  @hw: pointer to the HW structure
3178  *  @active: TRUE to enable LPLU, FALSE to disable
3179  *
3180  *  Sets the LPLU D0 state according to the active flag.  When
3181  *  activating LPLU this function also disables smart speed
3182  *  and vice versa.  LPLU will not be activated unless the
3183  *  device autonegotiation advertisement meets standards of
3184  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3185  *  This is a function pointer entry point only called by
3186  *  PHY setup routines.
3187  **/
3188 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3189 {
3190 	struct e1000_phy_info *phy = &hw->phy;
3191 	u32 phy_ctrl;
3192 	s32 ret_val = E1000_SUCCESS;
3193 	u16 data;
3194 
3195 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3196 
3197 	if (phy->type == e1000_phy_ife)
3198 		return E1000_SUCCESS;
3199 
3200 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3201 
3202 	if (active) {
3203 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3204 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3205 
3206 		if (phy->type != e1000_phy_igp_3)
3207 			return E1000_SUCCESS;
3208 
3209 		/* Call gig speed drop workaround on LPLU before accessing
3210 		 * any PHY registers
3211 		 */
3212 		if (hw->mac.type == e1000_ich8lan)
3213 			e1000_gig_downshift_workaround_ich8lan(hw);
3214 
3215 		/* When LPLU is enabled, we should disable SmartSpeed */
3216 		ret_val = phy->ops.read_reg(hw,
3217 					    IGP01E1000_PHY_PORT_CONFIG,
3218 					    &data);
3219 		if (ret_val)
3220 			return ret_val;
3221 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3222 		ret_val = phy->ops.write_reg(hw,
3223 					     IGP01E1000_PHY_PORT_CONFIG,
3224 					     data);
3225 		if (ret_val)
3226 			return ret_val;
3227 	} else {
3228 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3229 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3230 
3231 		if (phy->type != e1000_phy_igp_3)
3232 			return E1000_SUCCESS;
3233 
3234 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3235 		 * during Dx states where the power conservation is most
3236 		 * important.  During driver activity we should enable
3237 		 * SmartSpeed, so performance is maintained.
3238 		 */
3239 		if (phy->smart_speed == e1000_smart_speed_on) {
3240 			ret_val = phy->ops.read_reg(hw,
3241 						    IGP01E1000_PHY_PORT_CONFIG,
3242 						    &data);
3243 			if (ret_val)
3244 				return ret_val;
3245 
3246 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3247 			ret_val = phy->ops.write_reg(hw,
3248 						     IGP01E1000_PHY_PORT_CONFIG,
3249 						     data);
3250 			if (ret_val)
3251 				return ret_val;
3252 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3253 			ret_val = phy->ops.read_reg(hw,
3254 						    IGP01E1000_PHY_PORT_CONFIG,
3255 						    &data);
3256 			if (ret_val)
3257 				return ret_val;
3258 
3259 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3260 			ret_val = phy->ops.write_reg(hw,
3261 						     IGP01E1000_PHY_PORT_CONFIG,
3262 						     data);
3263 			if (ret_val)
3264 				return ret_val;
3265 		}
3266 	}
3267 
3268 	return E1000_SUCCESS;
3269 }
3270 
3271 /**
3272  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3273  *  @hw: pointer to the HW structure
3274  *  @active: TRUE to enable LPLU, FALSE to disable
3275  *
3276  *  Sets the LPLU D3 state according to the active flag.  When
3277  *  activating LPLU this function also disables smart speed
3278  *  and vice versa.  LPLU will not be activated unless the
3279  *  device autonegotiation advertisement meets standards of
3280  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3281  *  This is a function pointer entry point only called by
3282  *  PHY setup routines.
3283  **/
3284 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3285 {
3286 	struct e1000_phy_info *phy = &hw->phy;
3287 	u32 phy_ctrl;
3288 	s32 ret_val = E1000_SUCCESS;
3289 	u16 data;
3290 
3291 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3292 
3293 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3294 
3295 	if (!active) {
3296 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3297 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3298 
3299 		if (phy->type != e1000_phy_igp_3)
3300 			return E1000_SUCCESS;
3301 
3302 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3303 		 * during Dx states where the power conservation is most
3304 		 * important.  During driver activity we should enable
3305 		 * SmartSpeed, so performance is maintained.
3306 		 */
3307 		if (phy->smart_speed == e1000_smart_speed_on) {
3308 			ret_val = phy->ops.read_reg(hw,
3309 						    IGP01E1000_PHY_PORT_CONFIG,
3310 						    &data);
3311 			if (ret_val)
3312 				return ret_val;
3313 
3314 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3315 			ret_val = phy->ops.write_reg(hw,
3316 						     IGP01E1000_PHY_PORT_CONFIG,
3317 						     data);
3318 			if (ret_val)
3319 				return ret_val;
3320 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3321 			ret_val = phy->ops.read_reg(hw,
3322 						    IGP01E1000_PHY_PORT_CONFIG,
3323 						    &data);
3324 			if (ret_val)
3325 				return ret_val;
3326 
3327 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3328 			ret_val = phy->ops.write_reg(hw,
3329 						     IGP01E1000_PHY_PORT_CONFIG,
3330 						     data);
3331 			if (ret_val)
3332 				return ret_val;
3333 		}
3334 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3335 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3336 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3337 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3338 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3339 
3340 		if (phy->type != e1000_phy_igp_3)
3341 			return E1000_SUCCESS;
3342 
3343 		/* Call gig speed drop workaround on LPLU before accessing
3344 		 * any PHY registers
3345 		 */
3346 		if (hw->mac.type == e1000_ich8lan)
3347 			e1000_gig_downshift_workaround_ich8lan(hw);
3348 
3349 		/* When LPLU is enabled, we should disable SmartSpeed */
3350 		ret_val = phy->ops.read_reg(hw,
3351 					    IGP01E1000_PHY_PORT_CONFIG,
3352 					    &data);
3353 		if (ret_val)
3354 			return ret_val;
3355 
3356 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3357 		ret_val = phy->ops.write_reg(hw,
3358 					     IGP01E1000_PHY_PORT_CONFIG,
3359 					     data);
3360 	}
3361 
3362 	return ret_val;
3363 }
3364 
3365 /**
3366  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3367  *  @hw: pointer to the HW structure
3368  *  @bank:  pointer to the variable that returns the active bank
3369  *
3370  *  Reads signature byte from the NVM using the flash access registers.
3371  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3372  **/
3373 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3374 {
3375 	u32 eecd;
3376 	struct e1000_nvm_info *nvm = &hw->nvm;
3377 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3378 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3379 	u32 nvm_dword = 0;
3380 	u8 sig_byte = 0;
3381 	s32 ret_val;
3382 
3383 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3384 
3385 	switch (hw->mac.type) {
3386 	case e1000_pch_spt:
3387 		bank1_offset = nvm->flash_bank_size;
3388 		act_offset = E1000_ICH_NVM_SIG_WORD;
3389 
3390 		/* set bank to 0 in case flash read fails */
3391 		*bank = 0;
3392 
3393 		/* Check bank 0 */
3394 		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3395 							 &nvm_dword);
3396 		if (ret_val)
3397 			return ret_val;
3398 		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3399 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3400 		    E1000_ICH_NVM_SIG_VALUE) {
3401 			*bank = 0;
3402 			return E1000_SUCCESS;
3403 		}
3404 
3405 		/* Check bank 1 */
3406 		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3407 							 bank1_offset,
3408 							 &nvm_dword);
3409 		if (ret_val)
3410 			return ret_val;
3411 		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3412 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3413 		    E1000_ICH_NVM_SIG_VALUE) {
3414 			*bank = 1;
3415 			return E1000_SUCCESS;
3416 		}
3417 
3418 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3419 		return -E1000_ERR_NVM;
3420 	case e1000_ich8lan:
3421 	case e1000_ich9lan:
3422 		eecd = E1000_READ_REG(hw, E1000_EECD);
3423 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3424 		    E1000_EECD_SEC1VAL_VALID_MASK) {
3425 			if (eecd & E1000_EECD_SEC1VAL)
3426 				*bank = 1;
3427 			else
3428 				*bank = 0;
3429 
3430 			return E1000_SUCCESS;
3431 		}
3432 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3433 		/* fall-thru */
3434 	default:
3435 		/* set bank to 0 in case flash read fails */
3436 		*bank = 0;
3437 
3438 		/* Check bank 0 */
3439 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3440 							&sig_byte);
3441 		if (ret_val)
3442 			return ret_val;
3443 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3444 		    E1000_ICH_NVM_SIG_VALUE) {
3445 			*bank = 0;
3446 			return E1000_SUCCESS;
3447 		}
3448 
3449 		/* Check bank 1 */
3450 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3451 							bank1_offset,
3452 							&sig_byte);
3453 		if (ret_val)
3454 			return ret_val;
3455 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3456 		    E1000_ICH_NVM_SIG_VALUE) {
3457 			*bank = 1;
3458 			return E1000_SUCCESS;
3459 		}
3460 
3461 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3462 		return -E1000_ERR_NVM;
3463 	}
3464 }
3465 
3466 /**
3467  *  e1000_read_nvm_spt - NVM access for SPT
3468  *  @hw: pointer to the HW structure
3469  *  @offset: The offset (in bytes) of the word(s) to read.
3470  *  @words: Size of data to read in words.
3471  *  @data: pointer to the word(s) to read at offset.
3472  *
3473  *  Reads a word(s) from the NVM
3474  **/
3475 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3476 			      u16 *data)
3477 {
3478 	struct e1000_nvm_info *nvm = &hw->nvm;
3479 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3480 	u32 act_offset;
3481 	s32 ret_val = E1000_SUCCESS;
3482 	u32 bank = 0;
3483 	u32 dword = 0;
3484 	u16 offset_to_read;
3485 	u16 i;
3486 
3487 	DEBUGFUNC("e1000_read_nvm_spt");
3488 
3489 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3490 	    (words == 0)) {
3491 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3492 		ret_val = -E1000_ERR_NVM;
3493 		goto out;
3494 	}
3495 
3496 	nvm->ops.acquire(hw);
3497 
3498 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3499 	if (ret_val != E1000_SUCCESS) {
3500 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3501 		bank = 0;
3502 	}
3503 
3504 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3505 	act_offset += offset;
3506 
3507 	ret_val = E1000_SUCCESS;
3508 
3509 	for (i = 0; i < words; i += 2) {
3510 		if (words - i == 1) {
3511 			if (dev_spec->shadow_ram[offset+i].modified) {
3512 				data[i] = dev_spec->shadow_ram[offset+i].value;
3513 			} else {
3514 				offset_to_read = act_offset + i -
3515 						 ((act_offset + i) % 2);
3516 				ret_val =
3517 				   e1000_read_flash_dword_ich8lan(hw,
3518 								 offset_to_read,
3519 								 &dword);
3520 				if (ret_val)
3521 					break;
3522 				if ((act_offset + i) % 2 == 0)
3523 					data[i] = (u16)(dword & 0xFFFF);
3524 				else
3525 					data[i] = (u16)((dword >> 16) & 0xFFFF);
3526 			}
3527 		} else {
3528 			offset_to_read = act_offset + i;
3529 			if (!(dev_spec->shadow_ram[offset+i].modified) ||
3530 			    !(dev_spec->shadow_ram[offset+i+1].modified)) {
3531 				ret_val =
3532 				   e1000_read_flash_dword_ich8lan(hw,
3533 								 offset_to_read,
3534 								 &dword);
3535 				if (ret_val)
3536 					break;
3537 			}
3538 			if (dev_spec->shadow_ram[offset+i].modified)
3539 				data[i] = dev_spec->shadow_ram[offset+i].value;
3540 			else
3541 				data[i] = (u16) (dword & 0xFFFF);
3542 			if (dev_spec->shadow_ram[offset+i].modified)
3543 				data[i+1] =
3544 				   dev_spec->shadow_ram[offset+i+1].value;
3545 			else
3546 				data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3547 		}
3548 	}
3549 
3550 	nvm->ops.release(hw);
3551 
3552 out:
3553 	if (ret_val)
3554 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3555 
3556 	return ret_val;
3557 }
3558 
3559 /**
3560  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3561  *  @hw: pointer to the HW structure
3562  *  @offset: The offset (in bytes) of the word(s) to read.
3563  *  @words: Size of data to read in words
3564  *  @data: Pointer to the word(s) to read at offset.
3565  *
3566  *  Reads a word(s) from the NVM using the flash access registers.
3567  **/
3568 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3569 				  u16 *data)
3570 {
3571 	struct e1000_nvm_info *nvm = &hw->nvm;
3572 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3573 	u32 act_offset;
3574 	s32 ret_val = E1000_SUCCESS;
3575 	u32 bank = 0;
3576 	u16 i, word;
3577 
3578 	DEBUGFUNC("e1000_read_nvm_ich8lan");
3579 
3580 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3581 	    (words == 0)) {
3582 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3583 		ret_val = -E1000_ERR_NVM;
3584 		goto out;
3585 	}
3586 
3587 	nvm->ops.acquire(hw);
3588 
3589 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3590 	if (ret_val != E1000_SUCCESS) {
3591 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3592 		bank = 0;
3593 	}
3594 
3595 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3596 	act_offset += offset;
3597 
3598 	ret_val = E1000_SUCCESS;
3599 	for (i = 0; i < words; i++) {
3600 		if (dev_spec->shadow_ram[offset+i].modified) {
3601 			data[i] = dev_spec->shadow_ram[offset+i].value;
3602 		} else {
3603 			ret_val = e1000_read_flash_word_ich8lan(hw,
3604 								act_offset + i,
3605 								&word);
3606 			if (ret_val)
3607 				break;
3608 			data[i] = word;
3609 		}
3610 	}
3611 
3612 	nvm->ops.release(hw);
3613 
3614 out:
3615 	if (ret_val)
3616 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3617 
3618 	return ret_val;
3619 }
3620 
3621 /**
3622  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3623  *  @hw: pointer to the HW structure
3624  *
3625  *  This function does initial flash setup so that a new read/write/erase cycle
3626  *  can be started.
3627  **/
3628 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3629 {
3630 	union ich8_hws_flash_status hsfsts;
3631 	s32 ret_val = -E1000_ERR_NVM;
3632 
3633 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3634 
3635 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3636 
3637 	/* Check if the flash descriptor is valid */
3638 	if (!hsfsts.hsf_status.fldesvalid) {
3639 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3640 		return -E1000_ERR_NVM;
3641 	}
3642 
3643 	/* Clear FCERR and DAEL in hw status by writing 1 */
3644 	hsfsts.hsf_status.flcerr = 1;
3645 	hsfsts.hsf_status.dael = 1;
3646 	if (hw->mac.type == e1000_pch_spt)
3647 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3648 				      hsfsts.regval & 0xFFFF);
3649 	else
3650 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3651 
3652 	/* Either we should have a hardware SPI cycle in progress
3653 	 * bit to check against, in order to start a new cycle or
3654 	 * FDONE bit should be changed in the hardware so that it
3655 	 * is 1 after hardware reset, which can then be used as an
3656 	 * indication whether a cycle is in progress or has been
3657 	 * completed.
3658 	 */
3659 
3660 	if (!hsfsts.hsf_status.flcinprog) {
3661 		/* There is no cycle running at present,
3662 		 * so we can start a cycle.
3663 		 * Begin by setting Flash Cycle Done.
3664 		 */
3665 		hsfsts.hsf_status.flcdone = 1;
3666 		if (hw->mac.type == e1000_pch_spt)
3667 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3668 					      hsfsts.regval & 0xFFFF);
3669 		else
3670 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3671 						hsfsts.regval);
3672 		ret_val = E1000_SUCCESS;
3673 	} else {
3674 		s32 i;
3675 
3676 		/* Otherwise poll for sometime so the current
3677 		 * cycle has a chance to end before giving up.
3678 		 */
3679 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3680 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3681 							      ICH_FLASH_HSFSTS);
3682 			if (!hsfsts.hsf_status.flcinprog) {
3683 				ret_val = E1000_SUCCESS;
3684 				break;
3685 			}
3686 			usec_delay(1);
3687 		}
3688 		if (ret_val == E1000_SUCCESS) {
3689 			/* Successful in waiting for previous cycle to timeout,
3690 			 * now set the Flash Cycle Done.
3691 			 */
3692 			hsfsts.hsf_status.flcdone = 1;
3693 			if (hw->mac.type == e1000_pch_spt)
3694 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3695 						      hsfsts.regval & 0xFFFF);
3696 			else
3697 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3698 							hsfsts.regval);
3699 		} else {
3700 			DEBUGOUT("Flash controller busy, cannot get access\n");
3701 		}
3702 	}
3703 
3704 	return ret_val;
3705 }
3706 
3707 /**
3708  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3709  *  @hw: pointer to the HW structure
3710  *  @timeout: maximum time to wait for completion
3711  *
3712  *  This function starts a flash cycle and waits for its completion.
3713  **/
3714 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3715 {
3716 	union ich8_hws_flash_ctrl hsflctl;
3717 	union ich8_hws_flash_status hsfsts;
3718 	u32 i = 0;
3719 
3720 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3721 
3722 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3723 	if (hw->mac.type == e1000_pch_spt)
3724 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3725 	else
3726 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3727 	hsflctl.hsf_ctrl.flcgo = 1;
3728 
3729 	if (hw->mac.type == e1000_pch_spt)
3730 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3731 				      hsflctl.regval << 16);
3732 	else
3733 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3734 
3735 	/* wait till FDONE bit is set to 1 */
3736 	do {
3737 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3738 		if (hsfsts.hsf_status.flcdone)
3739 			break;
3740 		usec_delay(1);
3741 	} while (i++ < timeout);
3742 
3743 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3744 		return E1000_SUCCESS;
3745 
3746 	return -E1000_ERR_NVM;
3747 }
3748 
3749 /**
3750  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3751  *  @hw: pointer to the HW structure
3752  *  @offset: offset to data location
3753  *  @data: pointer to the location for storing the data
3754  *
3755  *  Reads the flash dword at offset into data.  Offset is converted
3756  *  to bytes before read.
3757  **/
3758 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3759 					  u32 *data)
3760 {
3761 	DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3762 
3763 	if (!data)
3764 		return -E1000_ERR_NVM;
3765 
3766 	/* Must convert word offset into bytes. */
3767 	offset <<= 1;
3768 
3769 	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3770 }
3771 
3772 /**
3773  *  e1000_read_flash_word_ich8lan - Read word from flash
3774  *  @hw: pointer to the HW structure
3775  *  @offset: offset to data location
3776  *  @data: pointer to the location for storing the data
3777  *
3778  *  Reads the flash word at offset into data.  Offset is converted
3779  *  to bytes before read.
3780  **/
3781 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3782 					 u16 *data)
3783 {
3784 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3785 
3786 	if (!data)
3787 		return -E1000_ERR_NVM;
3788 
3789 	/* Must convert offset into bytes. */
3790 	offset <<= 1;
3791 
3792 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3793 }
3794 
3795 /**
3796  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3797  *  @hw: pointer to the HW structure
3798  *  @offset: The offset of the byte to read.
3799  *  @data: Pointer to a byte to store the value read.
3800  *
3801  *  Reads a single byte from the NVM using the flash access registers.
3802  **/
3803 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3804 					 u8 *data)
3805 {
3806 	s32 ret_val;
3807 	u16 word = 0;
3808 
3809 	/* In SPT, only 32 bits access is supported,
3810 	 * so this function should not be called.
3811 	 */
3812 	if (hw->mac.type == e1000_pch_spt)
3813 		return -E1000_ERR_NVM;
3814 	else
3815 		ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3816 
3817 	if (ret_val)
3818 		return ret_val;
3819 
3820 	*data = (u8)word;
3821 
3822 	return E1000_SUCCESS;
3823 }
3824 
3825 /**
3826  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3827  *  @hw: pointer to the HW structure
3828  *  @offset: The offset (in bytes) of the byte or word to read.
3829  *  @size: Size of data to read, 1=byte 2=word
3830  *  @data: Pointer to the word to store the value read.
3831  *
3832  *  Reads a byte or word from the NVM using the flash access registers.
3833  **/
3834 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3835 					 u8 size, u16 *data)
3836 {
3837 	union ich8_hws_flash_status hsfsts;
3838 	union ich8_hws_flash_ctrl hsflctl;
3839 	u32 flash_linear_addr;
3840 	u32 flash_data = 0;
3841 	s32 ret_val = -E1000_ERR_NVM;
3842 	u8 count = 0;
3843 
3844 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3845 
3846 	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3847 		return -E1000_ERR_NVM;
3848 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3849 			     hw->nvm.flash_base_addr);
3850 
3851 	do {
3852 		usec_delay(1);
3853 		/* Steps */
3854 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3855 		if (ret_val != E1000_SUCCESS)
3856 			break;
3857 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3858 
3859 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3860 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3861 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3862 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3863 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3864 
3865 		ret_val = e1000_flash_cycle_ich8lan(hw,
3866 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3867 
3868 		/* Check if FCERR is set to 1, if set to 1, clear it
3869 		 * and try the whole sequence a few more times, else
3870 		 * read in (shift in) the Flash Data0, the order is
3871 		 * least significant byte first msb to lsb
3872 		 */
3873 		if (ret_val == E1000_SUCCESS) {
3874 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3875 			if (size == 1)
3876 				*data = (u8)(flash_data & 0x000000FF);
3877 			else if (size == 2)
3878 				*data = (u16)(flash_data & 0x0000FFFF);
3879 			break;
3880 		} else {
3881 			/* If we've gotten here, then things are probably
3882 			 * completely hosed, but if the error condition is
3883 			 * detected, it won't hurt to give it another try...
3884 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3885 			 */
3886 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3887 							      ICH_FLASH_HSFSTS);
3888 			if (hsfsts.hsf_status.flcerr) {
3889 				/* Repeat for some time before giving up. */
3890 				continue;
3891 			} else if (!hsfsts.hsf_status.flcdone) {
3892 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3893 				break;
3894 			}
3895 		}
3896 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3897 
3898 	return ret_val;
3899 }
3900 
3901 /**
3902  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3903  *  @hw: pointer to the HW structure
3904  *  @offset: The offset (in bytes) of the dword to read.
3905  *  @data: Pointer to the dword to store the value read.
3906  *
3907  *  Reads a byte or word from the NVM using the flash access registers.
3908  **/
3909 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3910 					   u32 *data)
3911 {
3912 	union ich8_hws_flash_status hsfsts;
3913 	union ich8_hws_flash_ctrl hsflctl;
3914 	u32 flash_linear_addr;
3915 	s32 ret_val = -E1000_ERR_NVM;
3916 	u8 count = 0;
3917 
3918 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3919 
3920 		if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3921 		    hw->mac.type != e1000_pch_spt)
3922 			return -E1000_ERR_NVM;
3923 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3924 			     hw->nvm.flash_base_addr);
3925 
3926 	do {
3927 		usec_delay(1);
3928 		/* Steps */
3929 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3930 		if (ret_val != E1000_SUCCESS)
3931 			break;
3932 		/* In SPT, This register is in Lan memory space, not flash.
3933 		 * Therefore, only 32 bit access is supported
3934 		 */
3935 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3936 
3937 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3938 		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3939 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3940 		/* In SPT, This register is in Lan memory space, not flash.
3941 		 * Therefore, only 32 bit access is supported
3942 		 */
3943 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3944 				      (u32)hsflctl.regval << 16);
3945 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3946 
3947 		ret_val = e1000_flash_cycle_ich8lan(hw,
3948 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3949 
3950 		/* Check if FCERR is set to 1, if set to 1, clear it
3951 		 * and try the whole sequence a few more times, else
3952 		 * read in (shift in) the Flash Data0, the order is
3953 		 * least significant byte first msb to lsb
3954 		 */
3955 		if (ret_val == E1000_SUCCESS) {
3956 			*data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3957 			break;
3958 		} else {
3959 			/* If we've gotten here, then things are probably
3960 			 * completely hosed, but if the error condition is
3961 			 * detected, it won't hurt to give it another try...
3962 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3963 			 */
3964 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3965 							      ICH_FLASH_HSFSTS);
3966 			if (hsfsts.hsf_status.flcerr) {
3967 				/* Repeat for some time before giving up. */
3968 				continue;
3969 			} else if (!hsfsts.hsf_status.flcdone) {
3970 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3971 				break;
3972 			}
3973 		}
3974 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3975 
3976 	return ret_val;
3977 }
3978 
3979 /**
3980  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3981  *  @hw: pointer to the HW structure
3982  *  @offset: The offset (in bytes) of the word(s) to write.
3983  *  @words: Size of data to write in words
3984  *  @data: Pointer to the word(s) to write at offset.
3985  *
3986  *  Writes a byte or word to the NVM using the flash access registers.
3987  **/
3988 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3989 				   u16 *data)
3990 {
3991 	struct e1000_nvm_info *nvm = &hw->nvm;
3992 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3993 	u16 i;
3994 
3995 	DEBUGFUNC("e1000_write_nvm_ich8lan");
3996 
3997 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3998 	    (words == 0)) {
3999 		DEBUGOUT("nvm parameter(s) out of bounds\n");
4000 		return -E1000_ERR_NVM;
4001 	}
4002 
4003 	nvm->ops.acquire(hw);
4004 
4005 	for (i = 0; i < words; i++) {
4006 		dev_spec->shadow_ram[offset+i].modified = TRUE;
4007 		dev_spec->shadow_ram[offset+i].value = data[i];
4008 	}
4009 
4010 	nvm->ops.release(hw);
4011 
4012 	return E1000_SUCCESS;
4013 }
4014 
4015 /**
4016  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4017  *  @hw: pointer to the HW structure
4018  *
4019  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4020  *  which writes the checksum to the shadow ram.  The changes in the shadow
4021  *  ram are then committed to the EEPROM by processing each bank at a time
4022  *  checking for the modified bit and writing only the pending changes.
4023  *  After a successful commit, the shadow ram is cleared and is ready for
4024  *  future writes.
4025  **/
4026 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4027 {
4028 	struct e1000_nvm_info *nvm = &hw->nvm;
4029 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4030 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4031 	s32 ret_val;
4032 	u32 dword = 0;
4033 
4034 	DEBUGFUNC("e1000_update_nvm_checksum_spt");
4035 
4036 	ret_val = e1000_update_nvm_checksum_generic(hw);
4037 	if (ret_val)
4038 		goto out;
4039 
4040 	if (nvm->type != e1000_nvm_flash_sw)
4041 		goto out;
4042 
4043 	nvm->ops.acquire(hw);
4044 
4045 	/* We're writing to the opposite bank so if we're on bank 1,
4046 	 * write to bank 0 etc.  We also need to erase the segment that
4047 	 * is going to be written
4048 	 */
4049 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4050 	if (ret_val != E1000_SUCCESS) {
4051 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4052 		bank = 0;
4053 	}
4054 
4055 	if (bank == 0) {
4056 		new_bank_offset = nvm->flash_bank_size;
4057 		old_bank_offset = 0;
4058 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4059 		if (ret_val)
4060 			goto release;
4061 	} else {
4062 		old_bank_offset = nvm->flash_bank_size;
4063 		new_bank_offset = 0;
4064 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4065 		if (ret_val)
4066 			goto release;
4067 	}
4068 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4069 		/* Determine whether to write the value stored
4070 		 * in the other NVM bank or a modified value stored
4071 		 * in the shadow RAM
4072 		 */
4073 		ret_val = e1000_read_flash_dword_ich8lan(hw,
4074 							 i + old_bank_offset,
4075 							 &dword);
4076 
4077 		if (dev_spec->shadow_ram[i].modified) {
4078 			dword &= 0xffff0000;
4079 			dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4080 		}
4081 		if (dev_spec->shadow_ram[i + 1].modified) {
4082 			dword &= 0x0000ffff;
4083 			dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4084 				  << 16);
4085 		}
4086 		if (ret_val)
4087 			break;
4088 
4089 		/* If the word is 0x13, then make sure the signature bits
4090 		 * (15:14) are 11b until the commit has completed.
4091 		 * This will allow us to write 10b which indicates the
4092 		 * signature is valid.  We want to do this after the write
4093 		 * has completed so that we don't mark the segment valid
4094 		 * while the write is still in progress
4095 		 */
4096 		if (i == E1000_ICH_NVM_SIG_WORD - 1)
4097 			dword |= E1000_ICH_NVM_SIG_MASK << 16;
4098 
4099 		/* Convert offset to bytes. */
4100 		act_offset = (i + new_bank_offset) << 1;
4101 
4102 		usec_delay(100);
4103 
4104 		/* Write the data to the new bank. Offset in words*/
4105 		act_offset = i + new_bank_offset;
4106 		ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4107 								dword);
4108 		if (ret_val)
4109 			break;
4110 	 }
4111 
4112 	/* Don't bother writing the segment valid bits if sector
4113 	 * programming failed.
4114 	 */
4115 	if (ret_val) {
4116 		DEBUGOUT("Flash commit failed.\n");
4117 		goto release;
4118 	}
4119 
4120 	/* Finally validate the new segment by setting bit 15:14
4121 	 * to 10b in word 0x13 , this can be done without an
4122 	 * erase as well since these bits are 11 to start with
4123 	 * and we need to change bit 14 to 0b
4124 	 */
4125 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4126 
4127 	/*offset in words but we read dword*/
4128 	--act_offset;
4129 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4130 
4131 	if (ret_val)
4132 		goto release;
4133 
4134 	dword &= 0xBFFFFFFF;
4135 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4136 
4137 	if (ret_val)
4138 		goto release;
4139 
4140 	/* And invalidate the previously valid segment by setting
4141 	 * its signature word (0x13) high_byte to 0b. This can be
4142 	 * done without an erase because flash erase sets all bits
4143 	 * to 1's. We can write 1's to 0's without an erase
4144 	 */
4145 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4146 
4147 	/* offset in words but we read dword*/
4148 	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4149 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4150 
4151 	if (ret_val)
4152 		goto release;
4153 
4154 	dword &= 0x00FFFFFF;
4155 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4156 
4157 	if (ret_val)
4158 		goto release;
4159 
4160 	/* Great!  Everything worked, we can now clear the cached entries. */
4161 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4162 		dev_spec->shadow_ram[i].modified = FALSE;
4163 		dev_spec->shadow_ram[i].value = 0xFFFF;
4164 	}
4165 
4166 release:
4167 	nvm->ops.release(hw);
4168 
4169 	/* Reload the EEPROM, or else modifications will not appear
4170 	 * until after the next adapter reset.
4171 	 */
4172 	if (!ret_val) {
4173 		nvm->ops.reload(hw);
4174 		msec_delay(10);
4175 	}
4176 
4177 out:
4178 	if (ret_val)
4179 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4180 
4181 	return ret_val;
4182 }
4183 
4184 /**
4185  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4186  *  @hw: pointer to the HW structure
4187  *
4188  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4189  *  which writes the checksum to the shadow ram.  The changes in the shadow
4190  *  ram are then committed to the EEPROM by processing each bank at a time
4191  *  checking for the modified bit and writing only the pending changes.
4192  *  After a successful commit, the shadow ram is cleared and is ready for
4193  *  future writes.
4194  **/
4195 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4196 {
4197 	struct e1000_nvm_info *nvm = &hw->nvm;
4198 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4199 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4200 	s32 ret_val;
4201 	u16 data = 0;
4202 
4203 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4204 
4205 	ret_val = e1000_update_nvm_checksum_generic(hw);
4206 	if (ret_val)
4207 		goto out;
4208 
4209 	if (nvm->type != e1000_nvm_flash_sw)
4210 		goto out;
4211 
4212 	nvm->ops.acquire(hw);
4213 
4214 	/* We're writing to the opposite bank so if we're on bank 1,
4215 	 * write to bank 0 etc.  We also need to erase the segment that
4216 	 * is going to be written
4217 	 */
4218 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4219 	if (ret_val != E1000_SUCCESS) {
4220 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4221 		bank = 0;
4222 	}
4223 
4224 	if (bank == 0) {
4225 		new_bank_offset = nvm->flash_bank_size;
4226 		old_bank_offset = 0;
4227 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4228 		if (ret_val)
4229 			goto release;
4230 	} else {
4231 		old_bank_offset = nvm->flash_bank_size;
4232 		new_bank_offset = 0;
4233 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4234 		if (ret_val)
4235 			goto release;
4236 	}
4237 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4238 		if (dev_spec->shadow_ram[i].modified) {
4239 			data = dev_spec->shadow_ram[i].value;
4240 		} else {
4241 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
4242 								old_bank_offset,
4243 								&data);
4244 			if (ret_val)
4245 				break;
4246 		}
4247 		/* If the word is 0x13, then make sure the signature bits
4248 		 * (15:14) are 11b until the commit has completed.
4249 		 * This will allow us to write 10b which indicates the
4250 		 * signature is valid.  We want to do this after the write
4251 		 * has completed so that we don't mark the segment valid
4252 		 * while the write is still in progress
4253 		 */
4254 		if (i == E1000_ICH_NVM_SIG_WORD)
4255 			data |= E1000_ICH_NVM_SIG_MASK;
4256 
4257 		/* Convert offset to bytes. */
4258 		act_offset = (i + new_bank_offset) << 1;
4259 
4260 		usec_delay(100);
4261 
4262 		/* Write the bytes to the new bank. */
4263 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4264 							       act_offset,
4265 							       (u8)data);
4266 		if (ret_val)
4267 			break;
4268 
4269 		usec_delay(100);
4270 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4271 							  act_offset + 1,
4272 							  (u8)(data >> 8));
4273 		if (ret_val)
4274 			break;
4275 	 }
4276 
4277 	/* Don't bother writing the segment valid bits if sector
4278 	 * programming failed.
4279 	 */
4280 	if (ret_val) {
4281 		DEBUGOUT("Flash commit failed.\n");
4282 		goto release;
4283 	}
4284 
4285 	/* Finally validate the new segment by setting bit 15:14
4286 	 * to 10b in word 0x13 , this can be done without an
4287 	 * erase as well since these bits are 11 to start with
4288 	 * and we need to change bit 14 to 0b
4289 	 */
4290 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4291 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4292 	if (ret_val)
4293 		goto release;
4294 
4295 	data &= 0xBFFF;
4296 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4297 						       (u8)(data >> 8));
4298 	if (ret_val)
4299 		goto release;
4300 
4301 	/* And invalidate the previously valid segment by setting
4302 	 * its signature word (0x13) high_byte to 0b. This can be
4303 	 * done without an erase because flash erase sets all bits
4304 	 * to 1's. We can write 1's to 0's without an erase
4305 	 */
4306 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4307 
4308 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4309 
4310 	if (ret_val)
4311 		goto release;
4312 
4313 	/* Great!  Everything worked, we can now clear the cached entries. */
4314 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4315 		dev_spec->shadow_ram[i].modified = FALSE;
4316 		dev_spec->shadow_ram[i].value = 0xFFFF;
4317 	}
4318 
4319 release:
4320 	nvm->ops.release(hw);
4321 
4322 	/* Reload the EEPROM, or else modifications will not appear
4323 	 * until after the next adapter reset.
4324 	 */
4325 	if (!ret_val) {
4326 		nvm->ops.reload(hw);
4327 		msec_delay(10);
4328 	}
4329 
4330 out:
4331 	if (ret_val)
4332 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4333 
4334 	return ret_val;
4335 }
4336 
4337 /**
4338  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4339  *  @hw: pointer to the HW structure
4340  *
4341  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4342  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4343  *  calculated, in which case we need to calculate the checksum and set bit 6.
4344  **/
4345 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4346 {
4347 	s32 ret_val;
4348 	u16 data;
4349 	u16 word;
4350 	u16 valid_csum_mask;
4351 
4352 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4353 
4354 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4355 	 * the checksum needs to be fixed.  This bit is an indication that
4356 	 * the NVM was prepared by OEM software and did not calculate
4357 	 * the checksum...a likely scenario.
4358 	 */
4359 	switch (hw->mac.type) {
4360 	case e1000_pch_lpt:
4361 	case e1000_pch_spt:
4362 		word = NVM_COMPAT;
4363 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4364 		break;
4365 	default:
4366 		word = NVM_FUTURE_INIT_WORD1;
4367 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4368 		break;
4369 	}
4370 
4371 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4372 	if (ret_val)
4373 		return ret_val;
4374 
4375 	if (!(data & valid_csum_mask)) {
4376 		data |= valid_csum_mask;
4377 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4378 		if (ret_val)
4379 			return ret_val;
4380 		ret_val = hw->nvm.ops.update(hw);
4381 		if (ret_val)
4382 			return ret_val;
4383 	}
4384 
4385 	return e1000_validate_nvm_checksum_generic(hw);
4386 }
4387 
4388 /**
4389  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4390  *  @hw: pointer to the HW structure
4391  *  @offset: The offset (in bytes) of the byte/word to read.
4392  *  @size: Size of data to read, 1=byte 2=word
4393  *  @data: The byte(s) to write to the NVM.
4394  *
4395  *  Writes one/two bytes to the NVM using the flash access registers.
4396  **/
4397 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4398 					  u8 size, u16 data)
4399 {
4400 	union ich8_hws_flash_status hsfsts;
4401 	union ich8_hws_flash_ctrl hsflctl;
4402 	u32 flash_linear_addr;
4403 	u32 flash_data = 0;
4404 	s32 ret_val;
4405 	u8 count = 0;
4406 
4407 	DEBUGFUNC("e1000_write_ich8_data");
4408 
4409 	if (hw->mac.type == e1000_pch_spt) {
4410 		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4411 			return -E1000_ERR_NVM;
4412 	} else {
4413 		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4414 			return -E1000_ERR_NVM;
4415 	}
4416 
4417 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4418 			     hw->nvm.flash_base_addr);
4419 
4420 	do {
4421 		usec_delay(1);
4422 		/* Steps */
4423 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4424 		if (ret_val != E1000_SUCCESS)
4425 			break;
4426 		/* In SPT, This register is in Lan memory space, not
4427 		 * flash.  Therefore, only 32 bit access is supported
4428 		 */
4429 		if (hw->mac.type == e1000_pch_spt)
4430 			hsflctl.regval =
4431 			    E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4432 		else
4433 			hsflctl.regval =
4434 			    E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4435 
4436 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4437 		hsflctl.hsf_ctrl.fldbcount = size - 1;
4438 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4439 		/* In SPT, This register is in Lan memory space,
4440 		 * not flash.  Therefore, only 32 bit access is
4441 		 * supported
4442 		 */
4443 		if (hw->mac.type == e1000_pch_spt)
4444 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4445 					      hsflctl.regval << 16);
4446 		else
4447 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4448 						hsflctl.regval);
4449 
4450 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4451 
4452 		if (size == 1)
4453 			flash_data = (u32)data & 0x00FF;
4454 		else
4455 			flash_data = (u32)data;
4456 
4457 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4458 
4459 		/* check if FCERR is set to 1 , if set to 1, clear it
4460 		 * and try the whole sequence a few more times else done
4461 		 */
4462 		ret_val =
4463 		    e1000_flash_cycle_ich8lan(hw,
4464 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4465 		if (ret_val == E1000_SUCCESS)
4466 			break;
4467 
4468 		/* If we're here, then things are most likely
4469 		 * completely hosed, but if the error condition
4470 		 * is detected, it won't hurt to give it another
4471 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4472 		 */
4473 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4474 		if (hsfsts.hsf_status.flcerr)
4475 			/* Repeat for some time before giving up. */
4476 			continue;
4477 		if (!hsfsts.hsf_status.flcdone) {
4478 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4479 			break;
4480 		}
4481 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4482 
4483 	return ret_val;
4484 }
4485 
4486 /**
4487 *  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4488 *  @hw: pointer to the HW structure
4489 *  @offset: The offset (in bytes) of the dwords to read.
4490 *  @data: The 4 bytes to write to the NVM.
4491 *
4492 *  Writes one/two/four bytes to the NVM using the flash access registers.
4493 **/
4494 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4495 					    u32 data)
4496 {
4497 	union ich8_hws_flash_status hsfsts;
4498 	union ich8_hws_flash_ctrl hsflctl;
4499 	u32 flash_linear_addr;
4500 	s32 ret_val;
4501 	u8 count = 0;
4502 
4503 	DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4504 
4505 	if (hw->mac.type == e1000_pch_spt) {
4506 		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4507 			return -E1000_ERR_NVM;
4508 	}
4509 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4510 			     hw->nvm.flash_base_addr);
4511 	do {
4512 		usec_delay(1);
4513 		/* Steps */
4514 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4515 		if (ret_val != E1000_SUCCESS)
4516 			break;
4517 
4518 		/* In SPT, This register is in Lan memory space, not
4519 		 * flash.  Therefore, only 32 bit access is supported
4520 		 */
4521 		if (hw->mac.type == e1000_pch_spt)
4522 			hsflctl.regval = E1000_READ_FLASH_REG(hw,
4523 							      ICH_FLASH_HSFSTS)
4524 					 >> 16;
4525 		else
4526 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4527 							      ICH_FLASH_HSFCTL);
4528 
4529 		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4530 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4531 
4532 		/* In SPT, This register is in Lan memory space,
4533 		 * not flash.  Therefore, only 32 bit access is
4534 		 * supported
4535 		 */
4536 		if (hw->mac.type == e1000_pch_spt)
4537 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4538 					      hsflctl.regval << 16);
4539 		else
4540 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4541 						hsflctl.regval);
4542 
4543 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4544 
4545 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4546 
4547 		/* check if FCERR is set to 1 , if set to 1, clear it
4548 		 * and try the whole sequence a few more times else done
4549 		 */
4550 		ret_val = e1000_flash_cycle_ich8lan(hw,
4551 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4552 
4553 		if (ret_val == E1000_SUCCESS)
4554 			break;
4555 
4556 		/* If we're here, then things are most likely
4557 		 * completely hosed, but if the error condition
4558 		 * is detected, it won't hurt to give it another
4559 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4560 		 */
4561 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4562 
4563 		if (hsfsts.hsf_status.flcerr)
4564 			/* Repeat for some time before giving up. */
4565 			continue;
4566 		if (!hsfsts.hsf_status.flcdone) {
4567 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4568 			break;
4569 		}
4570 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4571 
4572 	return ret_val;
4573 }
4574 
4575 /**
4576  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4577  *  @hw: pointer to the HW structure
4578  *  @offset: The index of the byte to read.
4579  *  @data: The byte to write to the NVM.
4580  *
4581  *  Writes a single byte to the NVM using the flash access registers.
4582  **/
4583 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4584 					  u8 data)
4585 {
4586 	u16 word = (u16)data;
4587 
4588 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4589 
4590 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4591 }
4592 
4593 /**
4594 *  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4595 *  @hw: pointer to the HW structure
4596 *  @offset: The offset of the word to write.
4597 *  @dword: The dword to write to the NVM.
4598 *
4599 *  Writes a single dword to the NVM using the flash access registers.
4600 *  Goes through a retry algorithm before giving up.
4601 **/
4602 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4603 						 u32 offset, u32 dword)
4604 {
4605 	s32 ret_val;
4606 	u16 program_retries;
4607 
4608 	DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4609 
4610 	/* Must convert word offset into bytes. */
4611 	offset <<= 1;
4612 
4613 	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4614 
4615 	if (!ret_val)
4616 		return ret_val;
4617 	for (program_retries = 0; program_retries < 100; program_retries++) {
4618 		DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4619 		usec_delay(100);
4620 		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4621 		if (ret_val == E1000_SUCCESS)
4622 			break;
4623 	}
4624 	if (program_retries == 100)
4625 		return -E1000_ERR_NVM;
4626 
4627 	return E1000_SUCCESS;
4628 }
4629 
4630 /**
4631  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4632  *  @hw: pointer to the HW structure
4633  *  @offset: The offset of the byte to write.
4634  *  @byte: The byte to write to the NVM.
4635  *
4636  *  Writes a single byte to the NVM using the flash access registers.
4637  *  Goes through a retry algorithm before giving up.
4638  **/
4639 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4640 						u32 offset, u8 byte)
4641 {
4642 	s32 ret_val;
4643 	u16 program_retries;
4644 
4645 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4646 
4647 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4648 	if (!ret_val)
4649 		return ret_val;
4650 
4651 	for (program_retries = 0; program_retries < 100; program_retries++) {
4652 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4653 		usec_delay(100);
4654 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4655 		if (ret_val == E1000_SUCCESS)
4656 			break;
4657 	}
4658 	if (program_retries == 100)
4659 		return -E1000_ERR_NVM;
4660 
4661 	return E1000_SUCCESS;
4662 }
4663 
4664 /**
4665  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4666  *  @hw: pointer to the HW structure
4667  *  @bank: 0 for first bank, 1 for second bank, etc.
4668  *
4669  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4670  *  bank N is 4096 * N + flash_reg_addr.
4671  **/
4672 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4673 {
4674 	struct e1000_nvm_info *nvm = &hw->nvm;
4675 	union ich8_hws_flash_status hsfsts;
4676 	union ich8_hws_flash_ctrl hsflctl;
4677 	u32 flash_linear_addr;
4678 	/* bank size is in 16bit words - adjust to bytes */
4679 	u32 flash_bank_size = nvm->flash_bank_size * 2;
4680 	s32 ret_val;
4681 	s32 count = 0;
4682 	s32 j, iteration, sector_size;
4683 
4684 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4685 
4686 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4687 
4688 	/* Determine HW Sector size: Read BERASE bits of hw flash status
4689 	 * register
4690 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4691 	 *     consecutive sectors.  The start index for the nth Hw sector
4692 	 *     can be calculated as = bank * 4096 + n * 256
4693 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4694 	 *     The start index for the nth Hw sector can be calculated
4695 	 *     as = bank * 4096
4696 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4697 	 *     (ich9 only, otherwise error condition)
4698 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4699 	 */
4700 	switch (hsfsts.hsf_status.berasesz) {
4701 	case 0:
4702 		/* Hw sector size 256 */
4703 		sector_size = ICH_FLASH_SEG_SIZE_256;
4704 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4705 		break;
4706 	case 1:
4707 		sector_size = ICH_FLASH_SEG_SIZE_4K;
4708 		iteration = 1;
4709 		break;
4710 	case 2:
4711 		sector_size = ICH_FLASH_SEG_SIZE_8K;
4712 		iteration = 1;
4713 		break;
4714 	case 3:
4715 		sector_size = ICH_FLASH_SEG_SIZE_64K;
4716 		iteration = 1;
4717 		break;
4718 	default:
4719 		return -E1000_ERR_NVM;
4720 	}
4721 
4722 	/* Start with the base address, then add the sector offset. */
4723 	flash_linear_addr = hw->nvm.flash_base_addr;
4724 	flash_linear_addr += (bank) ? flash_bank_size : 0;
4725 
4726 	for (j = 0; j < iteration; j++) {
4727 		do {
4728 			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4729 
4730 			/* Steps */
4731 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4732 			if (ret_val)
4733 				return ret_val;
4734 
4735 			/* Write a value 11 (block Erase) in Flash
4736 			 * Cycle field in hw flash control
4737 			 */
4738 			if (hw->mac.type == e1000_pch_spt)
4739 				hsflctl.regval =
4740 				    E1000_READ_FLASH_REG(hw,
4741 							 ICH_FLASH_HSFSTS)>>16;
4742 			else
4743 				hsflctl.regval =
4744 				    E1000_READ_FLASH_REG16(hw,
4745 							   ICH_FLASH_HSFCTL);
4746 
4747 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4748 			if (hw->mac.type == e1000_pch_spt)
4749 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4750 						      hsflctl.regval << 16);
4751 			else
4752 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4753 							hsflctl.regval);
4754 
4755 			/* Write the last 24 bits of an index within the
4756 			 * block into Flash Linear address field in Flash
4757 			 * Address.
4758 			 */
4759 			flash_linear_addr += (j * sector_size);
4760 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4761 					      flash_linear_addr);
4762 
4763 			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4764 			if (ret_val == E1000_SUCCESS)
4765 				break;
4766 
4767 			/* Check if FCERR is set to 1.  If 1,
4768 			 * clear it and try the whole sequence
4769 			 * a few more times else Done
4770 			 */
4771 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4772 						      ICH_FLASH_HSFSTS);
4773 			if (hsfsts.hsf_status.flcerr)
4774 				/* repeat for some time before giving up */
4775 				continue;
4776 			else if (!hsfsts.hsf_status.flcdone)
4777 				return ret_val;
4778 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4779 	}
4780 
4781 	return E1000_SUCCESS;
4782 }
4783 
4784 /**
4785  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4786  *  @hw: pointer to the HW structure
4787  *  @data: Pointer to the LED settings
4788  *
4789  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4790  *  settings is all 0's or F's, set the LED default to a valid LED default
4791  *  setting.
4792  **/
4793 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4794 {
4795 	s32 ret_val;
4796 
4797 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
4798 
4799 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4800 	if (ret_val) {
4801 		DEBUGOUT("NVM Read Error\n");
4802 		return ret_val;
4803 	}
4804 
4805 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4806 		*data = ID_LED_DEFAULT_ICH8LAN;
4807 
4808 	return E1000_SUCCESS;
4809 }
4810 
4811 /**
4812  *  e1000_id_led_init_pchlan - store LED configurations
4813  *  @hw: pointer to the HW structure
4814  *
4815  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4816  *  the PHY LED configuration register.
4817  *
4818  *  PCH also does not have an "always on" or "always off" mode which
4819  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4820  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4821  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4822  *  link based on logic in e1000_led_[on|off]_pchlan().
4823  **/
4824 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4825 {
4826 	struct e1000_mac_info *mac = &hw->mac;
4827 	s32 ret_val;
4828 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4829 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4830 	u16 data, i, temp, shift;
4831 
4832 	DEBUGFUNC("e1000_id_led_init_pchlan");
4833 
4834 	/* Get default ID LED modes */
4835 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4836 	if (ret_val)
4837 		return ret_val;
4838 
4839 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4840 	mac->ledctl_mode1 = mac->ledctl_default;
4841 	mac->ledctl_mode2 = mac->ledctl_default;
4842 
4843 	for (i = 0; i < 4; i++) {
4844 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4845 		shift = (i * 5);
4846 		switch (temp) {
4847 		case ID_LED_ON1_DEF2:
4848 		case ID_LED_ON1_ON2:
4849 		case ID_LED_ON1_OFF2:
4850 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4851 			mac->ledctl_mode1 |= (ledctl_on << shift);
4852 			break;
4853 		case ID_LED_OFF1_DEF2:
4854 		case ID_LED_OFF1_ON2:
4855 		case ID_LED_OFF1_OFF2:
4856 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4857 			mac->ledctl_mode1 |= (ledctl_off << shift);
4858 			break;
4859 		default:
4860 			/* Do nothing */
4861 			break;
4862 		}
4863 		switch (temp) {
4864 		case ID_LED_DEF1_ON2:
4865 		case ID_LED_ON1_ON2:
4866 		case ID_LED_OFF1_ON2:
4867 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4868 			mac->ledctl_mode2 |= (ledctl_on << shift);
4869 			break;
4870 		case ID_LED_DEF1_OFF2:
4871 		case ID_LED_ON1_OFF2:
4872 		case ID_LED_OFF1_OFF2:
4873 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4874 			mac->ledctl_mode2 |= (ledctl_off << shift);
4875 			break;
4876 		default:
4877 			/* Do nothing */
4878 			break;
4879 		}
4880 	}
4881 
4882 	return E1000_SUCCESS;
4883 }
4884 
4885 /**
4886  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4887  *  @hw: pointer to the HW structure
4888  *
4889  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4890  *  register, so the the bus width is hard coded.
4891  **/
4892 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4893 {
4894 	struct e1000_bus_info *bus = &hw->bus;
4895 	s32 ret_val;
4896 
4897 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
4898 
4899 	ret_val = e1000_get_bus_info_pcie_generic(hw);
4900 
4901 	/* ICH devices are "PCI Express"-ish.  They have
4902 	 * a configuration space, but do not contain
4903 	 * PCI Express Capability registers, so bus width
4904 	 * must be hardcoded.
4905 	 */
4906 	if (bus->width == e1000_bus_width_unknown)
4907 		bus->width = e1000_bus_width_pcie_x1;
4908 
4909 	return ret_val;
4910 }
4911 
4912 /**
4913  *  e1000_reset_hw_ich8lan - Reset the hardware
4914  *  @hw: pointer to the HW structure
4915  *
4916  *  Does a full reset of the hardware which includes a reset of the PHY and
4917  *  MAC.
4918  **/
4919 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4920 {
4921 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4922 	u16 kum_cfg;
4923 	u32 ctrl, reg;
4924 	s32 ret_val;
4925 
4926 	DEBUGFUNC("e1000_reset_hw_ich8lan");
4927 
4928 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4929 	 * on the last TLP read/write transaction when MAC is reset.
4930 	 */
4931 	ret_val = e1000_disable_pcie_master_generic(hw);
4932 	if (ret_val)
4933 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
4934 
4935 	DEBUGOUT("Masking off all interrupts\n");
4936 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4937 
4938 	/* Disable the Transmit and Receive units.  Then delay to allow
4939 	 * any pending transactions to complete before we hit the MAC
4940 	 * with the global reset.
4941 	 */
4942 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
4943 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4944 	E1000_WRITE_FLUSH(hw);
4945 
4946 	msec_delay(10);
4947 
4948 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4949 	if (hw->mac.type == e1000_ich8lan) {
4950 		/* Set Tx and Rx buffer allocation to 8k apiece. */
4951 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4952 		/* Set Packet Buffer Size to 16k. */
4953 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4954 	}
4955 
4956 	if (hw->mac.type == e1000_pchlan) {
4957 		/* Save the NVM K1 bit setting*/
4958 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4959 		if (ret_val)
4960 			return ret_val;
4961 
4962 		if (kum_cfg & E1000_NVM_K1_ENABLE)
4963 			dev_spec->nvm_k1_enabled = TRUE;
4964 		else
4965 			dev_spec->nvm_k1_enabled = FALSE;
4966 	}
4967 
4968 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4969 
4970 	if (!hw->phy.ops.check_reset_block(hw)) {
4971 		/* Full-chip reset requires MAC and PHY reset at the same
4972 		 * time to make sure the interface between MAC and the
4973 		 * external PHY is reset.
4974 		 */
4975 		ctrl |= E1000_CTRL_PHY_RST;
4976 
4977 		/* Gate automatic PHY configuration by hardware on
4978 		 * non-managed 82579
4979 		 */
4980 		if ((hw->mac.type == e1000_pch2lan) &&
4981 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4982 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
4983 	}
4984 	ret_val = e1000_acquire_swflag_ich8lan(hw);
4985 	DEBUGOUT("Issuing a global reset to ich8lan\n");
4986 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4987 	/* cannot issue a flush here because it hangs the hardware */
4988 	msec_delay(20);
4989 
4990 	/* Set Phy Config Counter to 50msec */
4991 	if (hw->mac.type == e1000_pch2lan) {
4992 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4993 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4994 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4995 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4996 	}
4997 
4998 	if (ctrl & E1000_CTRL_PHY_RST) {
4999 		ret_val = hw->phy.ops.get_cfg_done(hw);
5000 		if (ret_val)
5001 			return ret_val;
5002 
5003 		ret_val = e1000_post_phy_reset_ich8lan(hw);
5004 		if (ret_val)
5005 			return ret_val;
5006 	}
5007 
5008 	/* For PCH, this write will make sure that any noise
5009 	 * will be detected as a CRC error and be dropped rather than show up
5010 	 * as a bad packet to the DMA engine.
5011 	 */
5012 	if (hw->mac.type == e1000_pchlan)
5013 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5014 
5015 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5016 	E1000_READ_REG(hw, E1000_ICR);
5017 
5018 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
5019 	reg |= E1000_KABGTXD_BGSQLBIAS;
5020 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5021 
5022 	return E1000_SUCCESS;
5023 }
5024 
5025 /**
5026  *  e1000_init_hw_ich8lan - Initialize the hardware
5027  *  @hw: pointer to the HW structure
5028  *
5029  *  Prepares the hardware for transmit and receive by doing the following:
5030  *   - initialize hardware bits
5031  *   - initialize LED identification
5032  *   - setup receive address registers
5033  *   - setup flow control
5034  *   - setup transmit descriptors
5035  *   - clear statistics
5036  **/
5037 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5038 {
5039 	struct e1000_mac_info *mac = &hw->mac;
5040 	u32 ctrl_ext, txdctl, snoop;
5041 	s32 ret_val;
5042 	u16 i;
5043 
5044 	DEBUGFUNC("e1000_init_hw_ich8lan");
5045 
5046 	e1000_initialize_hw_bits_ich8lan(hw);
5047 
5048 	/* Initialize identification LED */
5049 	ret_val = mac->ops.id_led_init(hw);
5050 	/* An error is not fatal and we should not stop init due to this */
5051 	if (ret_val)
5052 		DEBUGOUT("Error initializing identification LED\n");
5053 
5054 	/* Setup the receive address. */
5055 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5056 
5057 	/* Zero out the Multicast HASH table */
5058 	DEBUGOUT("Zeroing the MTA\n");
5059 	for (i = 0; i < mac->mta_reg_count; i++)
5060 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5061 
5062 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
5063 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
5064 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5065 	 */
5066 	if (hw->phy.type == e1000_phy_82578) {
5067 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5068 		i &= ~BM_WUC_HOST_WU_BIT;
5069 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5070 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
5071 		if (ret_val)
5072 			return ret_val;
5073 	}
5074 
5075 	/* Setup link and flow control */
5076 	ret_val = mac->ops.setup_link(hw);
5077 
5078 	/* Set the transmit descriptor write-back policy for both queues */
5079 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5080 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5081 		  E1000_TXDCTL_FULL_TX_DESC_WB);
5082 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5083 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5084 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5085 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5086 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5087 		  E1000_TXDCTL_FULL_TX_DESC_WB);
5088 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5089 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5090 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5091 
5092 	/* ICH8 has opposite polarity of no_snoop bits.
5093 	 * By default, we should use snoop behavior.
5094 	 */
5095 	if (mac->type == e1000_ich8lan)
5096 		snoop = PCIE_ICH8_SNOOP_ALL;
5097 	else
5098 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5099 	e1000_set_pcie_no_snoop_generic(hw, snoop);
5100 
5101 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5102 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5103 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5104 
5105 	/* Clear all of the statistics registers (clear on read).  It is
5106 	 * important that we do this after we have tried to establish link
5107 	 * because the symbol error count will increment wildly if there
5108 	 * is no link.
5109 	 */
5110 	e1000_clear_hw_cntrs_ich8lan(hw);
5111 
5112 	return ret_val;
5113 }
5114 
5115 /**
5116  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5117  *  @hw: pointer to the HW structure
5118  *
5119  *  Sets/Clears required hardware bits necessary for correctly setting up the
5120  *  hardware for transmit and receive.
5121  **/
5122 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5123 {
5124 	u32 reg;
5125 
5126 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5127 
5128 	/* Extended Device Control */
5129 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5130 	reg |= (1 << 22);
5131 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
5132 	if (hw->mac.type >= e1000_pchlan)
5133 		reg |= E1000_CTRL_EXT_PHYPDEN;
5134 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5135 
5136 	/* Transmit Descriptor Control 0 */
5137 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5138 	reg |= (1 << 22);
5139 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5140 
5141 	/* Transmit Descriptor Control 1 */
5142 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5143 	reg |= (1 << 22);
5144 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5145 
5146 	/* Transmit Arbitration Control 0 */
5147 	reg = E1000_READ_REG(hw, E1000_TARC(0));
5148 	if (hw->mac.type == e1000_ich8lan)
5149 		reg |= (1 << 28) | (1 << 29);
5150 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5151 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5152 
5153 	/* Transmit Arbitration Control 1 */
5154 	reg = E1000_READ_REG(hw, E1000_TARC(1));
5155 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5156 		reg &= ~(1 << 28);
5157 	else
5158 		reg |= (1 << 28);
5159 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
5160 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5161 
5162 	/* Device Status */
5163 	if (hw->mac.type == e1000_ich8lan) {
5164 		reg = E1000_READ_REG(hw, E1000_STATUS);
5165 		reg &= ~(1 << 31);
5166 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
5167 	}
5168 
5169 	/* work-around descriptor data corruption issue during nfs v2 udp
5170 	 * traffic, just disable the nfs filtering capability
5171 	 */
5172 	reg = E1000_READ_REG(hw, E1000_RFCTL);
5173 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5174 
5175 	/* Disable IPv6 extension header parsing because some malformed
5176 	 * IPv6 headers can hang the Rx.
5177 	 */
5178 	if (hw->mac.type == e1000_ich8lan)
5179 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5180 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5181 
5182 	/* Enable ECC on Lynxpoint */
5183 	if ((hw->mac.type == e1000_pch_lpt) ||
5184 	    (hw->mac.type == e1000_pch_spt)) {
5185 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5186 		reg |= E1000_PBECCSTS_ECC_ENABLE;
5187 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5188 
5189 		reg = E1000_READ_REG(hw, E1000_CTRL);
5190 		reg |= E1000_CTRL_MEHE;
5191 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
5192 	}
5193 
5194 	return;
5195 }
5196 
5197 /**
5198  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5199  *  @hw: pointer to the HW structure
5200  *
5201  *  Determines which flow control settings to use, then configures flow
5202  *  control.  Calls the appropriate media-specific link configuration
5203  *  function.  Assuming the adapter has a valid link partner, a valid link
5204  *  should be established.  Assumes the hardware has previously been reset
5205  *  and the transmitter and receiver are not enabled.
5206  **/
5207 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5208 {
5209 	s32 ret_val;
5210 
5211 	DEBUGFUNC("e1000_setup_link_ich8lan");
5212 
5213 	if (hw->phy.ops.check_reset_block(hw))
5214 		return E1000_SUCCESS;
5215 
5216 	/* ICH parts do not have a word in the NVM to determine
5217 	 * the default flow control setting, so we explicitly
5218 	 * set it to full.
5219 	 */
5220 	if (hw->fc.requested_mode == e1000_fc_default)
5221 		hw->fc.requested_mode = e1000_fc_full;
5222 
5223 	/* Save off the requested flow control mode for use later.  Depending
5224 	 * on the link partner's capabilities, we may or may not use this mode.
5225 	 */
5226 	hw->fc.current_mode = hw->fc.requested_mode;
5227 
5228 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5229 		hw->fc.current_mode);
5230 
5231 	/* Continue to configure the copper link. */
5232 	ret_val = hw->mac.ops.setup_physical_interface(hw);
5233 	if (ret_val)
5234 		return ret_val;
5235 
5236 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5237 	if ((hw->phy.type == e1000_phy_82578) ||
5238 	    (hw->phy.type == e1000_phy_82579) ||
5239 	    (hw->phy.type == e1000_phy_i217) ||
5240 	    (hw->phy.type == e1000_phy_82577)) {
5241 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5242 
5243 		ret_val = hw->phy.ops.write_reg(hw,
5244 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
5245 					     hw->fc.pause_time);
5246 		if (ret_val)
5247 			return ret_val;
5248 	}
5249 
5250 	return e1000_set_fc_watermarks_generic(hw);
5251 }
5252 
5253 /**
5254  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5255  *  @hw: pointer to the HW structure
5256  *
5257  *  Configures the kumeran interface to the PHY to wait the appropriate time
5258  *  when polling the PHY, then call the generic setup_copper_link to finish
5259  *  configuring the copper link.
5260  **/
5261 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5262 {
5263 	u32 ctrl;
5264 	s32 ret_val;
5265 	u16 reg_data;
5266 
5267 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5268 
5269 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5270 	ctrl |= E1000_CTRL_SLU;
5271 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5272 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5273 
5274 	/* Set the mac to wait the maximum time between each iteration
5275 	 * and increase the max iterations when polling the phy;
5276 	 * this fixes erroneous timeouts at 10Mbps.
5277 	 */
5278 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5279 					       0xFFFF);
5280 	if (ret_val)
5281 		return ret_val;
5282 	ret_val = e1000_read_kmrn_reg_generic(hw,
5283 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
5284 					      &reg_data);
5285 	if (ret_val)
5286 		return ret_val;
5287 	reg_data |= 0x3F;
5288 	ret_val = e1000_write_kmrn_reg_generic(hw,
5289 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
5290 					       reg_data);
5291 	if (ret_val)
5292 		return ret_val;
5293 
5294 	switch (hw->phy.type) {
5295 	case e1000_phy_igp_3:
5296 		ret_val = e1000_copper_link_setup_igp(hw);
5297 		if (ret_val)
5298 			return ret_val;
5299 		break;
5300 	case e1000_phy_bm:
5301 	case e1000_phy_82578:
5302 		ret_val = e1000_copper_link_setup_m88(hw);
5303 		if (ret_val)
5304 			return ret_val;
5305 		break;
5306 	case e1000_phy_82577:
5307 	case e1000_phy_82579:
5308 		ret_val = e1000_copper_link_setup_82577(hw);
5309 		if (ret_val)
5310 			return ret_val;
5311 		break;
5312 	case e1000_phy_ife:
5313 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5314 					       &reg_data);
5315 		if (ret_val)
5316 			return ret_val;
5317 
5318 		reg_data &= ~IFE_PMC_AUTO_MDIX;
5319 
5320 		switch (hw->phy.mdix) {
5321 		case 1:
5322 			reg_data &= ~IFE_PMC_FORCE_MDIX;
5323 			break;
5324 		case 2:
5325 			reg_data |= IFE_PMC_FORCE_MDIX;
5326 			break;
5327 		case 0:
5328 		default:
5329 			reg_data |= IFE_PMC_AUTO_MDIX;
5330 			break;
5331 		}
5332 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5333 						reg_data);
5334 		if (ret_val)
5335 			return ret_val;
5336 		break;
5337 	default:
5338 		break;
5339 	}
5340 
5341 	return e1000_setup_copper_link_generic(hw);
5342 }
5343 
5344 /**
5345  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5346  *  @hw: pointer to the HW structure
5347  *
5348  *  Calls the PHY specific link setup function and then calls the
5349  *  generic setup_copper_link to finish configuring the link for
5350  *  Lynxpoint PCH devices
5351  **/
5352 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5353 {
5354 	u32 ctrl;
5355 	s32 ret_val;
5356 
5357 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5358 
5359 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5360 	ctrl |= E1000_CTRL_SLU;
5361 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5362 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5363 
5364 	ret_val = e1000_copper_link_setup_82577(hw);
5365 	if (ret_val)
5366 		return ret_val;
5367 
5368 	return e1000_setup_copper_link_generic(hw);
5369 }
5370 
5371 /**
5372  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5373  *  @hw: pointer to the HW structure
5374  *  @speed: pointer to store current link speed
5375  *  @duplex: pointer to store the current link duplex
5376  *
5377  *  Calls the generic get_speed_and_duplex to retrieve the current link
5378  *  information and then calls the Kumeran lock loss workaround for links at
5379  *  gigabit speeds.
5380  **/
5381 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5382 					  u16 *duplex)
5383 {
5384 	s32 ret_val;
5385 
5386 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5387 
5388 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5389 	if (ret_val)
5390 		return ret_val;
5391 
5392 	if ((hw->mac.type == e1000_ich8lan) &&
5393 	    (hw->phy.type == e1000_phy_igp_3) &&
5394 	    (*speed == SPEED_1000)) {
5395 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5396 	}
5397 
5398 	return ret_val;
5399 }
5400 
5401 /**
5402  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5403  *  @hw: pointer to the HW structure
5404  *
5405  *  Work-around for 82566 Kumeran PCS lock loss:
5406  *  On link status change (i.e. PCI reset, speed change) and link is up and
5407  *  speed is gigabit-
5408  *    0) if workaround is optionally disabled do nothing
5409  *    1) wait 1ms for Kumeran link to come up
5410  *    2) check Kumeran Diagnostic register PCS lock loss bit
5411  *    3) if not set the link is locked (all is good), otherwise...
5412  *    4) reset the PHY
5413  *    5) repeat up to 10 times
5414  *  Note: this is only called for IGP3 copper when speed is 1gb.
5415  **/
5416 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5417 {
5418 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5419 	u32 phy_ctrl;
5420 	s32 ret_val;
5421 	u16 i, data;
5422 	bool link;
5423 
5424 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5425 
5426 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5427 		return E1000_SUCCESS;
5428 
5429 	/* Make sure link is up before proceeding.  If not just return.
5430 	 * Attempting this while link is negotiating fouled up link
5431 	 * stability
5432 	 */
5433 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5434 	if (!link)
5435 		return E1000_SUCCESS;
5436 
5437 	for (i = 0; i < 10; i++) {
5438 		/* read once to clear */
5439 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5440 		if (ret_val)
5441 			return ret_val;
5442 		/* and again to get new status */
5443 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5444 		if (ret_val)
5445 			return ret_val;
5446 
5447 		/* check for PCS lock */
5448 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5449 			return E1000_SUCCESS;
5450 
5451 		/* Issue PHY reset */
5452 		hw->phy.ops.reset(hw);
5453 		msec_delay_irq(5);
5454 	}
5455 	/* Disable GigE link negotiation */
5456 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5457 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5458 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5459 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5460 
5461 	/* Call gig speed drop workaround on Gig disable before accessing
5462 	 * any PHY registers
5463 	 */
5464 	e1000_gig_downshift_workaround_ich8lan(hw);
5465 
5466 	/* unable to acquire PCS lock */
5467 	return -E1000_ERR_PHY;
5468 }
5469 
5470 /**
5471  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5472  *  @hw: pointer to the HW structure
5473  *  @state: boolean value used to set the current Kumeran workaround state
5474  *
5475  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
5476  *  /disabled - FALSE).
5477  **/
5478 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5479 						 bool state)
5480 {
5481 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5482 
5483 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5484 
5485 	if (hw->mac.type != e1000_ich8lan) {
5486 		DEBUGOUT("Workaround applies to ICH8 only.\n");
5487 		return;
5488 	}
5489 
5490 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5491 
5492 	return;
5493 }
5494 
5495 /**
5496  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5497  *  @hw: pointer to the HW structure
5498  *
5499  *  Workaround for 82566 power-down on D3 entry:
5500  *    1) disable gigabit link
5501  *    2) write VR power-down enable
5502  *    3) read it back
5503  *  Continue if successful, else issue LCD reset and repeat
5504  **/
5505 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5506 {
5507 	u32 reg;
5508 	u16 data;
5509 	u8  retry = 0;
5510 
5511 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5512 
5513 	if (hw->phy.type != e1000_phy_igp_3)
5514 		return;
5515 
5516 	/* Try the workaround twice (if needed) */
5517 	do {
5518 		/* Disable link */
5519 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5520 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5521 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5522 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5523 
5524 		/* Call gig speed drop workaround on Gig disable before
5525 		 * accessing any PHY registers
5526 		 */
5527 		if (hw->mac.type == e1000_ich8lan)
5528 			e1000_gig_downshift_workaround_ich8lan(hw);
5529 
5530 		/* Write VR power-down enable */
5531 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5532 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5533 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5534 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5535 
5536 		/* Read it back and test */
5537 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5538 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5539 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5540 			break;
5541 
5542 		/* Issue PHY reset and repeat at most one more time */
5543 		reg = E1000_READ_REG(hw, E1000_CTRL);
5544 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5545 		retry++;
5546 	} while (retry);
5547 }
5548 
5549 /**
5550  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5551  *  @hw: pointer to the HW structure
5552  *
5553  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5554  *  LPLU, Gig disable, MDIC PHY reset):
5555  *    1) Set Kumeran Near-end loopback
5556  *    2) Clear Kumeran Near-end loopback
5557  *  Should only be called for ICH8[m] devices with any 1G Phy.
5558  **/
5559 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5560 {
5561 	s32 ret_val;
5562 	u16 reg_data;
5563 
5564 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5565 
5566 	if ((hw->mac.type != e1000_ich8lan) ||
5567 	    (hw->phy.type == e1000_phy_ife))
5568 		return;
5569 
5570 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5571 					      &reg_data);
5572 	if (ret_val)
5573 		return;
5574 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5575 	ret_val = e1000_write_kmrn_reg_generic(hw,
5576 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
5577 					       reg_data);
5578 	if (ret_val)
5579 		return;
5580 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5581 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5582 				     reg_data);
5583 }
5584 
5585 /**
5586  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5587  *  @hw: pointer to the HW structure
5588  *
5589  *  During S0 to Sx transition, it is possible the link remains at gig
5590  *  instead of negotiating to a lower speed.  Before going to Sx, set
5591  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5592  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5593  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5594  *  needs to be written.
5595  *  Parts that support (and are linked to a partner which support) EEE in
5596  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5597  *  than 10Mbps w/o EEE.
5598  **/
5599 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5600 {
5601 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5602 	u32 phy_ctrl;
5603 	s32 ret_val;
5604 
5605 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5606 
5607 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5608 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5609 
5610 	if (hw->phy.type == e1000_phy_i217) {
5611 		u16 phy_reg, device_id = hw->device_id;
5612 
5613 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5614 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5615 		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5616 		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5617 		    (hw->mac.type == e1000_pch_spt)) {
5618 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5619 
5620 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5621 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5622 		}
5623 
5624 		ret_val = hw->phy.ops.acquire(hw);
5625 		if (ret_val)
5626 			goto out;
5627 
5628 		if (!dev_spec->eee_disable) {
5629 			u16 eee_advert;
5630 
5631 			ret_val =
5632 			    e1000_read_emi_reg_locked(hw,
5633 						      I217_EEE_ADVERTISEMENT,
5634 						      &eee_advert);
5635 			if (ret_val)
5636 				goto release;
5637 
5638 			/* Disable LPLU if both link partners support 100BaseT
5639 			 * EEE and 100Full is advertised on both ends of the
5640 			 * link, and enable Auto Enable LPI since there will
5641 			 * be no driver to enable LPI while in Sx.
5642 			 */
5643 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5644 			    (dev_spec->eee_lp_ability &
5645 			     I82579_EEE_100_SUPPORTED) &&
5646 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5647 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5648 					      E1000_PHY_CTRL_NOND0A_LPLU);
5649 
5650 				/* Set Auto Enable LPI after link up */
5651 				hw->phy.ops.read_reg_locked(hw,
5652 							    I217_LPI_GPIO_CTRL,
5653 							    &phy_reg);
5654 				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5655 				hw->phy.ops.write_reg_locked(hw,
5656 							     I217_LPI_GPIO_CTRL,
5657 							     phy_reg);
5658 			}
5659 		}
5660 
5661 		/* For i217 Intel Rapid Start Technology support,
5662 		 * when the system is going into Sx and no manageability engine
5663 		 * is present, the driver must configure proxy to reset only on
5664 		 * power good.  LPI (Low Power Idle) state must also reset only
5665 		 * on power good, as well as the MTA (Multicast table array).
5666 		 * The SMBus release must also be disabled on LCD reset.
5667 		 */
5668 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5669 		      E1000_ICH_FWSM_FW_VALID)) {
5670 			/* Enable proxy to reset only on power good. */
5671 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5672 						    &phy_reg);
5673 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5674 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5675 						     phy_reg);
5676 
5677 			/* Set bit enable LPI (EEE) to reset only on
5678 			 * power good.
5679 			*/
5680 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5681 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5682 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5683 
5684 			/* Disable the SMB release on LCD reset. */
5685 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5686 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5687 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5688 		}
5689 
5690 		/* Enable MTA to reset for Intel Rapid Start Technology
5691 		 * Support
5692 		 */
5693 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5694 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5695 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5696 
5697 release:
5698 		hw->phy.ops.release(hw);
5699 	}
5700 out:
5701 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5702 
5703 	if (hw->mac.type == e1000_ich8lan)
5704 		e1000_gig_downshift_workaround_ich8lan(hw);
5705 
5706 	if (hw->mac.type >= e1000_pchlan) {
5707 		e1000_oem_bits_config_ich8lan(hw, FALSE);
5708 
5709 		/* Reset PHY to activate OEM bits on 82577/8 */
5710 		if (hw->mac.type == e1000_pchlan)
5711 			e1000_phy_hw_reset_generic(hw);
5712 
5713 		ret_val = hw->phy.ops.acquire(hw);
5714 		if (ret_val)
5715 			return;
5716 		e1000_write_smbus_addr(hw);
5717 		hw->phy.ops.release(hw);
5718 	}
5719 
5720 	return;
5721 }
5722 
5723 /**
5724  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5725  *  @hw: pointer to the HW structure
5726  *
5727  *  During Sx to S0 transitions on non-managed devices or managed devices
5728  *  on which PHY resets are not blocked, if the PHY registers cannot be
5729  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5730  *  the PHY.
5731  *  On i217, setup Intel Rapid Start Technology.
5732  **/
5733 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5734 {
5735 	s32 ret_val;
5736 
5737 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
5738 	if (hw->mac.type < e1000_pch2lan)
5739 		return E1000_SUCCESS;
5740 
5741 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5742 	if (ret_val) {
5743 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5744 		return ret_val;
5745 	}
5746 
5747 	/* For i217 Intel Rapid Start Technology support when the system
5748 	 * is transitioning from Sx and no manageability engine is present
5749 	 * configure SMBus to restore on reset, disable proxy, and enable
5750 	 * the reset on MTA (Multicast table array).
5751 	 */
5752 	if (hw->phy.type == e1000_phy_i217) {
5753 		u16 phy_reg;
5754 
5755 		ret_val = hw->phy.ops.acquire(hw);
5756 		if (ret_val) {
5757 			DEBUGOUT("Failed to setup iRST\n");
5758 			return ret_val;
5759 		}
5760 
5761 		/* Clear Auto Enable LPI after link up */
5762 		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5763 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5764 		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5765 
5766 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5767 		    E1000_ICH_FWSM_FW_VALID)) {
5768 			/* Restore clear on SMB if no manageability engine
5769 			 * is present
5770 			 */
5771 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5772 							      &phy_reg);
5773 			if (ret_val)
5774 				goto release;
5775 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5776 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5777 
5778 			/* Disable Proxy */
5779 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5780 		}
5781 		/* Enable reset on MTA */
5782 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5783 						      &phy_reg);
5784 		if (ret_val)
5785 			goto release;
5786 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5787 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5788 release:
5789 		if (ret_val)
5790 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5791 		hw->phy.ops.release(hw);
5792 		return ret_val;
5793 	}
5794 	return E1000_SUCCESS;
5795 }
5796 
5797 /**
5798  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5799  *  @hw: pointer to the HW structure
5800  *
5801  *  Return the LED back to the default configuration.
5802  **/
5803 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5804 {
5805 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
5806 
5807 	if (hw->phy.type == e1000_phy_ife)
5808 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5809 					     0);
5810 
5811 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5812 	return E1000_SUCCESS;
5813 }
5814 
5815 /**
5816  *  e1000_led_on_ich8lan - Turn LEDs on
5817  *  @hw: pointer to the HW structure
5818  *
5819  *  Turn on the LEDs.
5820  **/
5821 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5822 {
5823 	DEBUGFUNC("e1000_led_on_ich8lan");
5824 
5825 	if (hw->phy.type == e1000_phy_ife)
5826 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5827 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5828 
5829 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5830 	return E1000_SUCCESS;
5831 }
5832 
5833 /**
5834  *  e1000_led_off_ich8lan - Turn LEDs off
5835  *  @hw: pointer to the HW structure
5836  *
5837  *  Turn off the LEDs.
5838  **/
5839 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5840 {
5841 	DEBUGFUNC("e1000_led_off_ich8lan");
5842 
5843 	if (hw->phy.type == e1000_phy_ife)
5844 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5845 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5846 
5847 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5848 	return E1000_SUCCESS;
5849 }
5850 
5851 /**
5852  *  e1000_setup_led_pchlan - Configures SW controllable LED
5853  *  @hw: pointer to the HW structure
5854  *
5855  *  This prepares the SW controllable LED for use.
5856  **/
5857 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5858 {
5859 	DEBUGFUNC("e1000_setup_led_pchlan");
5860 
5861 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5862 				     (u16)hw->mac.ledctl_mode1);
5863 }
5864 
5865 /**
5866  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5867  *  @hw: pointer to the HW structure
5868  *
5869  *  Return the LED back to the default configuration.
5870  **/
5871 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5872 {
5873 	DEBUGFUNC("e1000_cleanup_led_pchlan");
5874 
5875 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5876 				     (u16)hw->mac.ledctl_default);
5877 }
5878 
5879 /**
5880  *  e1000_led_on_pchlan - Turn LEDs on
5881  *  @hw: pointer to the HW structure
5882  *
5883  *  Turn on the LEDs.
5884  **/
5885 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5886 {
5887 	u16 data = (u16)hw->mac.ledctl_mode2;
5888 	u32 i, led;
5889 
5890 	DEBUGFUNC("e1000_led_on_pchlan");
5891 
5892 	/* If no link, then turn LED on by setting the invert bit
5893 	 * for each LED that's mode is "link_up" in ledctl_mode2.
5894 	 */
5895 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5896 		for (i = 0; i < 3; i++) {
5897 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5898 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5899 			    E1000_LEDCTL_MODE_LINK_UP)
5900 				continue;
5901 			if (led & E1000_PHY_LED0_IVRT)
5902 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5903 			else
5904 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5905 		}
5906 	}
5907 
5908 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5909 }
5910 
5911 /**
5912  *  e1000_led_off_pchlan - Turn LEDs off
5913  *  @hw: pointer to the HW structure
5914  *
5915  *  Turn off the LEDs.
5916  **/
5917 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5918 {
5919 	u16 data = (u16)hw->mac.ledctl_mode1;
5920 	u32 i, led;
5921 
5922 	DEBUGFUNC("e1000_led_off_pchlan");
5923 
5924 	/* If no link, then turn LED off by clearing the invert bit
5925 	 * for each LED that's mode is "link_up" in ledctl_mode1.
5926 	 */
5927 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5928 		for (i = 0; i < 3; i++) {
5929 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5930 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5931 			    E1000_LEDCTL_MODE_LINK_UP)
5932 				continue;
5933 			if (led & E1000_PHY_LED0_IVRT)
5934 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5935 			else
5936 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5937 		}
5938 	}
5939 
5940 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5941 }
5942 
5943 /**
5944  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5945  *  @hw: pointer to the HW structure
5946  *
5947  *  Read appropriate register for the config done bit for completion status
5948  *  and configure the PHY through s/w for EEPROM-less parts.
5949  *
5950  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5951  *  config done bit, so only an error is logged and continues.  If we were
5952  *  to return with error, EEPROM-less silicon would not be able to be reset
5953  *  or change link.
5954  **/
5955 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5956 {
5957 	s32 ret_val = E1000_SUCCESS;
5958 	u32 bank = 0;
5959 	u32 status;
5960 
5961 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5962 
5963 	e1000_get_cfg_done_generic(hw);
5964 
5965 	/* Wait for indication from h/w that it has completed basic config */
5966 	if (hw->mac.type >= e1000_ich10lan) {
5967 		e1000_lan_init_done_ich8lan(hw);
5968 	} else {
5969 		ret_val = e1000_get_auto_rd_done_generic(hw);
5970 		if (ret_val) {
5971 			/* When auto config read does not complete, do not
5972 			 * return with an error. This can happen in situations
5973 			 * where there is no eeprom and prevents getting link.
5974 			 */
5975 			DEBUGOUT("Auto Read Done did not complete\n");
5976 			ret_val = E1000_SUCCESS;
5977 		}
5978 	}
5979 
5980 	/* Clear PHY Reset Asserted bit */
5981 	status = E1000_READ_REG(hw, E1000_STATUS);
5982 	if (status & E1000_STATUS_PHYRA)
5983 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5984 	else
5985 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5986 
5987 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
5988 	if (hw->mac.type <= e1000_ich9lan) {
5989 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5990 		    (hw->phy.type == e1000_phy_igp_3)) {
5991 			e1000_phy_init_script_igp3(hw);
5992 		}
5993 	} else {
5994 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5995 			/* Maybe we should do a basic PHY config */
5996 			DEBUGOUT("EEPROM not present\n");
5997 			ret_val = -E1000_ERR_CONFIG;
5998 		}
5999 	}
6000 
6001 	return ret_val;
6002 }
6003 
6004 /**
6005  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6006  * @hw: pointer to the HW structure
6007  *
6008  * In the case of a PHY power down to save power, or to turn off link during a
6009  * driver unload, or wake on lan is not enabled, remove the link.
6010  **/
6011 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6012 {
6013 	/* If the management interface is not enabled, then power down */
6014 	if (!(hw->mac.ops.check_mng_mode(hw) ||
6015 	      hw->phy.ops.check_reset_block(hw)))
6016 		e1000_power_down_phy_copper(hw);
6017 
6018 	return;
6019 }
6020 
6021 /**
6022  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6023  *  @hw: pointer to the HW structure
6024  *
6025  *  Clears hardware counters specific to the silicon family and calls
6026  *  clear_hw_cntrs_generic to clear all general purpose counters.
6027  **/
6028 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6029 {
6030 	u16 phy_data;
6031 	s32 ret_val;
6032 
6033 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6034 
6035 	e1000_clear_hw_cntrs_base_generic(hw);
6036 
6037 	E1000_READ_REG(hw, E1000_ALGNERRC);
6038 	E1000_READ_REG(hw, E1000_RXERRC);
6039 	E1000_READ_REG(hw, E1000_TNCRS);
6040 	E1000_READ_REG(hw, E1000_CEXTERR);
6041 	E1000_READ_REG(hw, E1000_TSCTC);
6042 	E1000_READ_REG(hw, E1000_TSCTFC);
6043 
6044 	E1000_READ_REG(hw, E1000_MGTPRC);
6045 	E1000_READ_REG(hw, E1000_MGTPDC);
6046 	E1000_READ_REG(hw, E1000_MGTPTC);
6047 
6048 	E1000_READ_REG(hw, E1000_IAC);
6049 	E1000_READ_REG(hw, E1000_ICRXOC);
6050 
6051 	/* Clear PHY statistics registers */
6052 	if ((hw->phy.type == e1000_phy_82578) ||
6053 	    (hw->phy.type == e1000_phy_82579) ||
6054 	    (hw->phy.type == e1000_phy_i217) ||
6055 	    (hw->phy.type == e1000_phy_82577)) {
6056 		ret_val = hw->phy.ops.acquire(hw);
6057 		if (ret_val)
6058 			return;
6059 		ret_val = hw->phy.ops.set_page(hw,
6060 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
6061 		if (ret_val)
6062 			goto release;
6063 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6064 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6065 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6066 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6067 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6068 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6069 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6070 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6071 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6072 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6073 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6074 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6075 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6076 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6077 release:
6078 		hw->phy.ops.release(hw);
6079 	}
6080 }
6081 
6082