xref: /dragonfly/sys/dev/netif/ig_hal/e1000_ich8lan.c (revision 7d3e9a5b)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2019, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  * Ethernet Connection I217-LM
63  * Ethernet Connection I217-V
64  * Ethernet Connection I218-V
65  * Ethernet Connection I218-LM
66  * Ethernet Connection (2) I218-LM
67  * Ethernet Connection (2) I218-V
68  * Ethernet Connection (3) I218-LM
69  * Ethernet Connection (3) I218-V
70  */
71 
72 #include "e1000_api.h"
73 
74 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84 					      u8 *mc_addr_list,
85 					      u32 mc_addr_count);
86 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90 					    bool active);
91 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92 					    bool active);
93 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 				   u16 words, u16 *data);
95 static s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
96 			       u16 *data);
97 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 				    u16 words, u16 *data);
99 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103 					    u16 *data);
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 					   u16 *speed, u16 *duplex);
113 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 					  u32 offset, u8 *data);
127 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128 					  u8 size, u16 *data);
129 static s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130 					    u32 *data);
131 static s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
132 					   u32 offset, u32 *data);
133 static s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
134 					     u32 offset, u32 data);
135 static s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
136 						  u32 offset, u32 dword);
137 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
138 					  u32 offset, u16 *data);
139 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
140 						 u32 offset, u8 byte);
141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
148 
149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
150 /* Offset 04h HSFSTS */
151 union ich8_hws_flash_status {
152 	struct ich8_hsfsts {
153 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
154 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
155 		u16 dael:1; /* bit 2 Direct Access error Log */
156 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
157 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
158 		u16 reserved1:2; /* bit 13:6 Reserved */
159 		u16 reserved2:6; /* bit 13:6 Reserved */
160 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
161 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
162 	} hsf_status;
163 	u16 regval;
164 };
165 
166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
167 /* Offset 06h FLCTL */
168 union ich8_hws_flash_ctrl {
169 	struct ich8_hsflctl {
170 		u16 flcgo:1;   /* 0 Flash Cycle Go */
171 		u16 flcycle:2;   /* 2:1 Flash Cycle */
172 		u16 reserved:5;   /* 7:3 Reserved  */
173 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
174 		u16 flockdn:6;   /* 15:10 Reserved */
175 	} hsf_ctrl;
176 	u16 regval;
177 };
178 
179 /* ICH Flash Region Access Permissions */
180 union ich8_hws_flash_regacc {
181 	struct ich8_flracc {
182 		u32 grra:8; /* 0:7 GbE region Read Access */
183 		u32 grwa:8; /* 8:15 GbE region Write Access */
184 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
185 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
186 	} hsf_flregacc;
187 	u16 regval;
188 };
189 
190 /**
191  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
192  *  @hw: pointer to the HW structure
193  *
194  *  Test access to the PHY registers by reading the PHY ID registers.  If
195  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
196  *  otherwise assume the read PHY ID is correct if it is valid.
197  *
198  *  Assumes the sw/fw/hw semaphore is already acquired.
199  **/
200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
201 {
202 	u16 phy_reg = 0;
203 	u32 phy_id = 0;
204 	s32 ret_val = 0;
205 	u16 retry_count;
206 	u32 mac_reg = 0;
207 
208 	for (retry_count = 0; retry_count < 2; retry_count++) {
209 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
210 		if (ret_val || (phy_reg == 0xFFFF))
211 			continue;
212 		phy_id = (u32)(phy_reg << 16);
213 
214 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
215 		if (ret_val || (phy_reg == 0xFFFF)) {
216 			phy_id = 0;
217 			continue;
218 		}
219 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
220 		break;
221 	}
222 
223 	if (hw->phy.id) {
224 		if  (hw->phy.id == phy_id)
225 			goto out;
226 	} else if (phy_id) {
227 		hw->phy.id = phy_id;
228 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229 		goto out;
230 	}
231 
232 	/* In case the PHY needs to be in mdio slow mode,
233 	 * set slow mode and try to get the PHY id again.
234 	 */
235 	if (hw->mac.type < e1000_pch_lpt) {
236 		hw->phy.ops.release(hw);
237 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
238 		if (!ret_val)
239 			ret_val = e1000_get_phy_id(hw);
240 		hw->phy.ops.acquire(hw);
241 	}
242 
243 	if (ret_val)
244 		return FALSE;
245 out:
246 	if (hw->mac.type >= e1000_pch_lpt) {
247 		/* Only unforce SMBus if ME is not active */
248 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
249 		    E1000_ICH_FWSM_FW_VALID)) {
250 			/* Unforce SMBus mode in PHY */
251 			hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
252 			phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
253 			hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
254 
255 			/* Unforce SMBus mode in MAC */
256 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
257 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
258 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
259 		}
260 	}
261 
262 	return TRUE;
263 }
264 
265 /**
266  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
267  *  @hw: pointer to the HW structure
268  *
269  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
270  *  used to reset the PHY to a quiescent state when necessary.
271  **/
272 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
273 {
274 	u32 mac_reg;
275 
276 	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
277 
278 	/* Set Phy Config Counter to 50msec */
279 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
280 	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
281 	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
282 	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
283 
284 	/* Toggle LANPHYPC Value bit */
285 	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
286 	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
287 	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
288 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
289 	E1000_WRITE_FLUSH(hw);
290 	msec_delay(1);
291 	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
292 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
293 	E1000_WRITE_FLUSH(hw);
294 
295 	if (hw->mac.type < e1000_pch_lpt) {
296 		msec_delay(50);
297 	} else {
298 		u16 count = 20;
299 
300 		do {
301 			msec_delay(5);
302 		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
303 			   E1000_CTRL_EXT_LPCD) && count--);
304 
305 		msec_delay(30);
306 	}
307 }
308 
309 /**
310  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
311  *  @hw: pointer to the HW structure
312  *
313  *  Workarounds/flow necessary for PHY initialization during driver load
314  *  and resume paths.
315  **/
316 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
317 {
318 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
319 	s32 ret_val;
320 
321 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
322 
323 	/* Gate automatic PHY configuration by hardware on managed and
324 	 * non-managed 82579 and newer adapters.
325 	 */
326 	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
327 
328 	/* It is not possible to be certain of the current state of ULP
329 	 * so forcibly disable it.
330 	 */
331 	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
332 	e1000_disable_ulp_lpt_lp(hw, TRUE);
333 
334 	ret_val = hw->phy.ops.acquire(hw);
335 	if (ret_val) {
336 		DEBUGOUT("Failed to initialize PHY flow\n");
337 		goto out;
338 	}
339 
340 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
341 	 * inaccessible and resetting the PHY is not blocked, toggle the
342 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
343 	 */
344 	switch (hw->mac.type) {
345 	case e1000_pch_lpt:
346 	case e1000_pch_spt:
347 	case e1000_pch_cnp:
348 		if (e1000_phy_is_accessible_pchlan(hw))
349 			break;
350 
351 		/* Before toggling LANPHYPC, see if PHY is accessible by
352 		 * forcing MAC to SMBus mode first.
353 		 */
354 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
355 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
356 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
357 
358 		/* Wait 50 milliseconds for MAC to finish any retries
359 		 * that it might be trying to perform from previous
360 		 * attempts to acknowledge any phy read requests.
361 		 */
362 		 msec_delay(50);
363 
364 		/* fall-through */
365 	case e1000_pch2lan:
366 		if (e1000_phy_is_accessible_pchlan(hw))
367 			break;
368 
369 		/* fall-through */
370 	case e1000_pchlan:
371 		if ((hw->mac.type == e1000_pchlan) &&
372 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
373 			break;
374 
375 		if (hw->phy.ops.check_reset_block(hw)) {
376 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
377 			ret_val = -E1000_ERR_PHY;
378 			break;
379 		}
380 
381 		/* Toggle LANPHYPC Value bit */
382 		e1000_toggle_lanphypc_pch_lpt(hw);
383 		if (hw->mac.type >= e1000_pch_lpt) {
384 			if (e1000_phy_is_accessible_pchlan(hw))
385 				break;
386 
387 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
388 			 * so ensure that the MAC is also out of SMBus mode
389 			 */
390 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
391 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
392 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
393 
394 			if (e1000_phy_is_accessible_pchlan(hw))
395 				break;
396 
397 			ret_val = -E1000_ERR_PHY;
398 		}
399 		break;
400 	default:
401 		break;
402 	}
403 
404 	hw->phy.ops.release(hw);
405 	if (!ret_val) {
406 
407 		/* Check to see if able to reset PHY.  Print error if not */
408 		if (hw->phy.ops.check_reset_block(hw)) {
409 			ERROR_REPORT("Reset blocked by ME\n");
410 			goto out;
411 		}
412 
413 		/* Reset the PHY before any access to it.  Doing so, ensures
414 		 * that the PHY is in a known good state before we read/write
415 		 * PHY registers.  The generic reset is sufficient here,
416 		 * because we haven't determined the PHY type yet.
417 		 */
418 		ret_val = e1000_phy_hw_reset_generic(hw);
419 		if (ret_val)
420 			goto out;
421 
422 		/* On a successful reset, possibly need to wait for the PHY
423 		 * to quiesce to an accessible state before returning control
424 		 * to the calling function.  If the PHY does not quiesce, then
425 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
426 		 *  the PHY is in.
427 		 */
428 		ret_val = hw->phy.ops.check_reset_block(hw);
429 		if (ret_val)
430 			ERROR_REPORT("ME blocked access to PHY after reset\n");
431 	}
432 
433 out:
434 	/* Ungate automatic PHY configuration on non-managed 82579 */
435 	if ((hw->mac.type == e1000_pch2lan) &&
436 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
437 		msec_delay(10);
438 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
439 	}
440 
441 	return ret_val;
442 }
443 
444 /**
445  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
446  *  @hw: pointer to the HW structure
447  *
448  *  Initialize family-specific PHY parameters and function pointers.
449  **/
450 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
451 {
452 	struct e1000_phy_info *phy = &hw->phy;
453 	s32 ret_val;
454 
455 	DEBUGFUNC("e1000_init_phy_params_pchlan");
456 
457 	phy->addr		= 1;
458 	phy->reset_delay_us	= 100;
459 
460 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
461 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
462 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
463 	phy->ops.set_page	= e1000_set_page_igp;
464 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
465 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
466 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
467 	phy->ops.release	= e1000_release_swflag_ich8lan;
468 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
469 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
470 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
471 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
472 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
473 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
474 	phy->ops.power_up	= e1000_power_up_phy_copper;
475 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
476 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
477 
478 	phy->id = e1000_phy_unknown;
479 
480 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
481 	if (ret_val)
482 		return ret_val;
483 
484 	if (phy->id == e1000_phy_unknown)
485 		switch (hw->mac.type) {
486 		default:
487 			ret_val = e1000_get_phy_id(hw);
488 			if (ret_val)
489 				return ret_val;
490 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
491 				break;
492 			/* fall-through */
493 		case e1000_pch2lan:
494 		case e1000_pch_lpt:
495 		case e1000_pch_spt:
496 		case e1000_pch_cnp:
497 			/* In case the PHY needs to be in mdio slow mode,
498 			 * set slow mode and try to get the PHY id again.
499 			 */
500 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
501 			if (ret_val)
502 				return ret_val;
503 			ret_val = e1000_get_phy_id(hw);
504 			if (ret_val)
505 				return ret_val;
506 			break;
507 		}
508 	phy->type = e1000_get_phy_type_from_id(phy->id);
509 
510 	switch (phy->type) {
511 	case e1000_phy_82577:
512 	case e1000_phy_82579:
513 	case e1000_phy_i217:
514 		phy->ops.check_polarity = e1000_check_polarity_82577;
515 		phy->ops.force_speed_duplex =
516 			e1000_phy_force_speed_duplex_82577;
517 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
518 		phy->ops.get_info = e1000_get_phy_info_82577;
519 		phy->ops.commit = e1000_phy_sw_reset_generic;
520 		break;
521 	case e1000_phy_82578:
522 		phy->ops.check_polarity = e1000_check_polarity_m88;
523 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
524 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
525 		phy->ops.get_info = e1000_get_phy_info_m88;
526 		break;
527 	default:
528 		ret_val = -E1000_ERR_PHY;
529 		break;
530 	}
531 
532 	return ret_val;
533 }
534 
535 /**
536  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
537  *  @hw: pointer to the HW structure
538  *
539  *  Initialize family-specific PHY parameters and function pointers.
540  **/
541 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
542 {
543 	struct e1000_phy_info *phy = &hw->phy;
544 	s32 ret_val;
545 	u16 i = 0;
546 
547 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
548 
549 	phy->addr		= 1;
550 	phy->reset_delay_us	= 100;
551 
552 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
553 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
554 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
555 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
556 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
557 	phy->ops.release	= e1000_release_swflag_ich8lan;
558 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
559 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
560 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
561 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
562 	phy->ops.power_up	= e1000_power_up_phy_copper;
563 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
564 
565 	/* We may need to do this twice - once for IGP and if that fails,
566 	 * we'll set BM func pointers and try again
567 	 */
568 	ret_val = e1000_determine_phy_address(hw);
569 	if (ret_val) {
570 		phy->ops.write_reg = e1000_write_phy_reg_bm;
571 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
572 		ret_val = e1000_determine_phy_address(hw);
573 		if (ret_val) {
574 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
575 			return ret_val;
576 		}
577 	}
578 
579 	phy->id = 0;
580 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
581 	       (i++ < 100)) {
582 		msec_delay(1);
583 		ret_val = e1000_get_phy_id(hw);
584 		if (ret_val)
585 			return ret_val;
586 	}
587 
588 	/* Verify phy id */
589 	switch (phy->id) {
590 	case IGP03E1000_E_PHY_ID:
591 		phy->type = e1000_phy_igp_3;
592 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
593 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
594 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
595 		phy->ops.get_info = e1000_get_phy_info_igp;
596 		phy->ops.check_polarity = e1000_check_polarity_igp;
597 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
598 		break;
599 	case IFE_E_PHY_ID:
600 	case IFE_PLUS_E_PHY_ID:
601 	case IFE_C_E_PHY_ID:
602 		phy->type = e1000_phy_ife;
603 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
604 		phy->ops.get_info = e1000_get_phy_info_ife;
605 		phy->ops.check_polarity = e1000_check_polarity_ife;
606 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
607 		break;
608 	case BME1000_E_PHY_ID:
609 		phy->type = e1000_phy_bm;
610 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
611 		phy->ops.read_reg = e1000_read_phy_reg_bm;
612 		phy->ops.write_reg = e1000_write_phy_reg_bm;
613 		phy->ops.commit = e1000_phy_sw_reset_generic;
614 		phy->ops.get_info = e1000_get_phy_info_m88;
615 		phy->ops.check_polarity = e1000_check_polarity_m88;
616 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
617 		break;
618 	default:
619 		return -E1000_ERR_PHY;
620 		break;
621 	}
622 
623 	return E1000_SUCCESS;
624 }
625 
626 /**
627  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
628  *  @hw: pointer to the HW structure
629  *
630  *  Initialize family-specific NVM parameters and function
631  *  pointers.
632  **/
633 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
634 {
635 	struct e1000_nvm_info *nvm = &hw->nvm;
636 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
637 	u32 gfpreg, sector_base_addr, sector_end_addr;
638 	u16 i;
639 	u32 nvm_size;
640 
641 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
642 
643 	nvm->type = e1000_nvm_flash_sw;
644 
645 	if (hw->mac.type >= e1000_pch_spt) {
646 		/* in SPT, gfpreg doesn't exist. NVM size is taken from the
647 		 * STRAP register. This is because in SPT the GbE Flash region
648 		 * is no longer accessed through the flash registers. Instead,
649 		 * the mechanism has changed, and the Flash region access
650 		 * registers are now implemented in GbE memory space.
651 		 */
652 		nvm->flash_base_addr = 0;
653 		nvm_size =
654 		    (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
655 		    * NVM_SIZE_MULTIPLIER;
656 		nvm->flash_bank_size = nvm_size / 2;
657 		/* Adjust to word count */
658 		nvm->flash_bank_size /= sizeof(u16);
659 		/* Set the base address for flash register access */
660 		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
661 	} else {
662 		/* Can't read flash registers if register set isn't mapped. */
663 		if (!hw->flash_address) {
664 			DEBUGOUT("ERROR: Flash registers not mapped\n");
665 			return -E1000_ERR_CONFIG;
666 		}
667 
668 		gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
669 
670 		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
671 		 * Add 1 to sector_end_addr since this sector is included in
672 		 * the overall size.
673 		 */
674 		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
675 		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
676 
677 		/* flash_base_addr is byte-aligned */
678 		nvm->flash_base_addr = sector_base_addr
679 				       << FLASH_SECTOR_ADDR_SHIFT;
680 
681 		/* find total size of the NVM, then cut in half since the total
682 		 * size represents two separate NVM banks.
683 		 */
684 		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
685 					<< FLASH_SECTOR_ADDR_SHIFT);
686 		nvm->flash_bank_size /= 2;
687 		/* Adjust to word count */
688 		nvm->flash_bank_size /= sizeof(u16);
689 	}
690 
691 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
692 
693 	/* Clear shadow ram */
694 	for (i = 0; i < nvm->word_size; i++) {
695 		dev_spec->shadow_ram[i].modified = FALSE;
696 		dev_spec->shadow_ram[i].value    = 0xFFFF;
697 	}
698 
699 	/* Function Pointers */
700 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
701 	nvm->ops.release	= e1000_release_nvm_ich8lan;
702 	if (hw->mac.type >= e1000_pch_spt) {
703 		nvm->ops.read	= e1000_read_nvm_spt;
704 		nvm->ops.update	= e1000_update_nvm_checksum_spt;
705 	} else {
706 		nvm->ops.read	= e1000_read_nvm_ich8lan;
707 		nvm->ops.update	= e1000_update_nvm_checksum_ich8lan;
708 	}
709 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
710 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
711 	nvm->ops.write		= e1000_write_nvm_ich8lan;
712 
713 	return E1000_SUCCESS;
714 }
715 
716 /**
717  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
718  *  @hw: pointer to the HW structure
719  *
720  *  Initialize family-specific MAC parameters and function
721  *  pointers.
722  **/
723 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
724 {
725 	struct e1000_mac_info *mac = &hw->mac;
726 
727 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
728 
729 	/* Set media type function pointer */
730 	hw->phy.media_type = e1000_media_type_copper;
731 
732 	/* Set mta register count */
733 	mac->mta_reg_count = 32;
734 	/* Set rar entry count */
735 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
736 	if (mac->type == e1000_ich8lan)
737 		mac->rar_entry_count--;
738 	/* Set if part includes ASF firmware */
739 	mac->asf_firmware_present = TRUE;
740 	/* FWSM register */
741 	mac->has_fwsm = TRUE;
742 	/* ARC subsystem not supported */
743 	mac->arc_subsystem_valid = FALSE;
744 	/* Adaptive IFS supported */
745 	mac->adaptive_ifs = TRUE;
746 
747 	/* Function pointers */
748 
749 	/* bus type/speed/width */
750 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
751 	/* function id */
752 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
753 	/* reset */
754 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
755 	/* hw initialization */
756 	mac->ops.init_hw = e1000_init_hw_ich8lan;
757 	/* link setup */
758 	mac->ops.setup_link = e1000_setup_link_ich8lan;
759 	/* physical interface setup */
760 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
761 	/* check for link */
762 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
763 	/* link info */
764 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
765 	/* multicast address update */
766 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
767 	/* clear hardware counters */
768 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
769 
770 	/* LED and other operations */
771 	switch (mac->type) {
772 	case e1000_ich8lan:
773 	case e1000_ich9lan:
774 	case e1000_ich10lan:
775 		/* check management mode */
776 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
777 		/* ID LED init */
778 		mac->ops.id_led_init = e1000_id_led_init_generic;
779 		/* blink LED */
780 		mac->ops.blink_led = e1000_blink_led_generic;
781 		/* setup LED */
782 		mac->ops.setup_led = e1000_setup_led_generic;
783 		/* cleanup LED */
784 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
785 		/* turn on/off LED */
786 		mac->ops.led_on = e1000_led_on_ich8lan;
787 		mac->ops.led_off = e1000_led_off_ich8lan;
788 		break;
789 	case e1000_pch2lan:
790 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
791 		mac->ops.rar_set = e1000_rar_set_pch2lan;
792 		/* fall-through */
793 	case e1000_pch_lpt:
794 	case e1000_pch_spt:
795 	case e1000_pch_cnp:
796 		/* multicast address update for pch2 */
797 		mac->ops.update_mc_addr_list =
798 			e1000_update_mc_addr_list_pch2lan;
799 		/* fall-through */
800 	case e1000_pchlan:
801 		/* check management mode */
802 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
803 		/* ID LED init */
804 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
805 		/* setup LED */
806 		mac->ops.setup_led = e1000_setup_led_pchlan;
807 		/* cleanup LED */
808 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
809 		/* turn on/off LED */
810 		mac->ops.led_on = e1000_led_on_pchlan;
811 		mac->ops.led_off = e1000_led_off_pchlan;
812 		break;
813 	default:
814 		break;
815 	}
816 
817 	if (mac->type >= e1000_pch_lpt) {
818 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
819 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
820 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
821 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
822 	}
823 
824 	/* Enable PCS Lock-loss workaround for ICH8 */
825 	if (mac->type == e1000_ich8lan)
826 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
827 
828 	return E1000_SUCCESS;
829 }
830 
831 /**
832  *  __e1000_access_emi_reg_locked - Read/write EMI register
833  *  @hw: pointer to the HW structure
834  *  @address: EMI address to program
835  *  @data: pointer to value to read/write from/to the EMI address
836  *  @read: boolean flag to indicate read or write
837  *
838  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
839  **/
840 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
841 					 u16 *data, bool read)
842 {
843 	s32 ret_val;
844 
845 	DEBUGFUNC("__e1000_access_emi_reg_locked");
846 
847 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
848 	if (ret_val)
849 		return ret_val;
850 
851 	if (read)
852 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
853 						      data);
854 	else
855 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
856 						       *data);
857 
858 	return ret_val;
859 }
860 
861 /**
862  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
863  *  @hw: pointer to the HW structure
864  *  @addr: EMI address to program
865  *  @data: value to be read from the EMI address
866  *
867  *  Assumes the SW/FW/HW Semaphore is already acquired.
868  **/
869 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
870 {
871 	DEBUGFUNC("e1000_read_emi_reg_locked");
872 
873 	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
874 }
875 
876 /**
877  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
878  *  @hw: pointer to the HW structure
879  *  @addr: EMI address to program
880  *  @data: value to be written to the EMI address
881  *
882  *  Assumes the SW/FW/HW Semaphore is already acquired.
883  **/
884 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
885 {
886 	DEBUGFUNC("e1000_read_emi_reg_locked");
887 
888 	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
889 }
890 
891 /**
892  *  e1000_set_eee_pchlan - Enable/disable EEE support
893  *  @hw: pointer to the HW structure
894  *
895  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
896  *  the link and the EEE capabilities of the link partner.  The LPI Control
897  *  register bits will remain set only if/when link is up.
898  *
899  *  EEE LPI must not be asserted earlier than one second after link is up.
900  *  On 82579, EEE LPI should not be enabled until such time otherwise there
901  *  can be link issues with some switches.  Other devices can have EEE LPI
902  *  enabled immediately upon link up since they have a timer in hardware which
903  *  prevents LPI from being asserted too early.
904  **/
905 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
906 {
907 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
908 	s32 ret_val;
909 	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
910 
911 	DEBUGFUNC("e1000_set_eee_pchlan");
912 
913 	switch (hw->phy.type) {
914 	case e1000_phy_82579:
915 		lpa = I82579_EEE_LP_ABILITY;
916 		pcs_status = I82579_EEE_PCS_STATUS;
917 		adv_addr = I82579_EEE_ADVERTISEMENT;
918 		break;
919 	case e1000_phy_i217:
920 		lpa = I217_EEE_LP_ABILITY;
921 		pcs_status = I217_EEE_PCS_STATUS;
922 		adv_addr = I217_EEE_ADVERTISEMENT;
923 		break;
924 	default:
925 		return E1000_SUCCESS;
926 	}
927 
928 	ret_val = hw->phy.ops.acquire(hw);
929 	if (ret_val)
930 		return ret_val;
931 
932 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
933 	if (ret_val)
934 		goto release;
935 
936 	/* Clear bits that enable EEE in various speeds */
937 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
938 
939 	/* Enable EEE if not disabled by user */
940 	if (!dev_spec->eee_disable) {
941 		/* Save off link partner's EEE ability */
942 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
943 						    &dev_spec->eee_lp_ability);
944 		if (ret_val)
945 			goto release;
946 
947 		/* Read EEE advertisement */
948 		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
949 		if (ret_val)
950 			goto release;
951 
952 		/* Enable EEE only for speeds in which the link partner is
953 		 * EEE capable and for which we advertise EEE.
954 		 */
955 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
956 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
957 
958 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
959 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
960 			if (data & NWAY_LPAR_100TX_FD_CAPS)
961 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
962 			else
963 				/* EEE is not supported in 100Half, so ignore
964 				 * partner's EEE in 100 ability if full-duplex
965 				 * is not advertised.
966 				 */
967 				dev_spec->eee_lp_ability &=
968 				    ~I82579_EEE_100_SUPPORTED;
969 		}
970 	}
971 
972 	if (hw->phy.type == e1000_phy_82579) {
973 		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
974 						    &data);
975 		if (ret_val)
976 			goto release;
977 
978 		data &= ~I82579_LPI_100_PLL_SHUT;
979 		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
980 						     data);
981 	}
982 
983 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
984 	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
985 	if (ret_val)
986 		goto release;
987 
988 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
989 release:
990 	hw->phy.ops.release(hw);
991 
992 	return ret_val;
993 }
994 
995 /**
996  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
997  *  @hw:   pointer to the HW structure
998  *  @link: link up bool flag
999  *
1000  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1001  *  preventing further DMA write requests.  Workaround the issue by disabling
1002  *  the de-assertion of the clock request when in 1Gpbs mode.
1003  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1004  *  speeds in order to avoid Tx hangs.
1005  **/
1006 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1007 {
1008 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1009 	u32 status = E1000_READ_REG(hw, E1000_STATUS);
1010 	s32 ret_val = E1000_SUCCESS;
1011 	u16 reg;
1012 
1013 	if (link && (status & E1000_STATUS_SPEED_1000)) {
1014 		ret_val = hw->phy.ops.acquire(hw);
1015 		if (ret_val)
1016 			return ret_val;
1017 
1018 		ret_val =
1019 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1020 					       &reg);
1021 		if (ret_val)
1022 			goto release;
1023 
1024 		ret_val =
1025 		    e1000_write_kmrn_reg_locked(hw,
1026 						E1000_KMRNCTRLSTA_K1_CONFIG,
1027 						reg &
1028 						~E1000_KMRNCTRLSTA_K1_ENABLE);
1029 		if (ret_val)
1030 			goto release;
1031 
1032 		usec_delay(10);
1033 
1034 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1035 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1036 
1037 		ret_val =
1038 		    e1000_write_kmrn_reg_locked(hw,
1039 						E1000_KMRNCTRLSTA_K1_CONFIG,
1040 						reg);
1041 release:
1042 		hw->phy.ops.release(hw);
1043 	} else {
1044 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
1045 		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1046 
1047 		if ((hw->phy.revision > 5) || !link ||
1048 		    ((status & E1000_STATUS_SPEED_100) &&
1049 		     (status & E1000_STATUS_FD)))
1050 			goto update_fextnvm6;
1051 
1052 		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1053 		if (ret_val)
1054 			return ret_val;
1055 
1056 		/* Clear link status transmit timeout */
1057 		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1058 
1059 		if (status & E1000_STATUS_SPEED_100) {
1060 			/* Set inband Tx timeout to 5x10us for 100Half */
1061 			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1062 
1063 			/* Do not extend the K1 entry latency for 100Half */
1064 			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1065 		} else {
1066 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1067 			reg |= 50 <<
1068 			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1069 
1070 			/* Extend the K1 entry latency for 10 Mbps */
1071 			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1072 		}
1073 
1074 		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1075 		if (ret_val)
1076 			return ret_val;
1077 
1078 update_fextnvm6:
1079 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1080 	}
1081 
1082 	return ret_val;
1083 }
1084 
1085 static u64 e1000_ltr2ns(u16 ltr)
1086 {
1087 	u32 value, scale;
1088 
1089 	/* Determine the latency in nsec based on the LTR value & scale */
1090 	value = ltr & E1000_LTRV_VALUE_MASK;
1091 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1092 
1093 	return value * (1ULL << (scale * E1000_LTRV_SCALE_FACTOR));
1094 }
1095 
1096 /**
1097  *  e1000_platform_pm_pch_lpt - Set platform power management values
1098  *  @hw: pointer to the HW structure
1099  *  @link: bool indicating link status
1100  *
1101  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1102  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1103  *  when link is up (which must not exceed the maximum latency supported
1104  *  by the platform), otherwise specify there is no LTR requirement.
1105  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1106  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1107  *  Capability register set, on this device LTR is set by writing the
1108  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1109  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1110  *  message to the PMC.
1111  *
1112  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1113  *  high-water mark.
1114  **/
1115 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1116 {
1117 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1118 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1119 	u16 lat_enc = 0;	/* latency encoded */
1120 	s32 obff_hwm = 0;
1121 
1122 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
1123 
1124 	if (link) {
1125 		u16 speed, duplex, scale = 0;
1126 		u16 max_snoop, max_nosnoop;
1127 		u16 max_ltr_enc;	/* max LTR latency encoded */
1128 		s64 lat_ns;
1129 		s64 value;
1130 		u32 rxa;
1131 
1132 		if (!hw->mac.max_frame_size) {
1133 			DEBUGOUT("max_frame_size not set.\n");
1134 			return -E1000_ERR_CONFIG;
1135 		}
1136 
1137 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1138 		if (!speed) {
1139 			DEBUGOUT("Speed not set.\n");
1140 			return -E1000_ERR_CONFIG;
1141 		}
1142 
1143 		/* Rx Packet Buffer Allocation size (KB) */
1144 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1145 
1146 		/* Determine the maximum latency tolerated by the device.
1147 		 *
1148 		 * Per the PCIe spec, the tolerated latencies are encoded as
1149 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1150 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1151 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1152 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1153 		 */
1154 		lat_ns = ((s64)rxa * 1024 -
1155 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1156 		if (lat_ns < 0)
1157 			lat_ns = 0;
1158 		else
1159 			lat_ns /= speed;
1160 		value = lat_ns;
1161 
1162 		while (value > E1000_LTRV_VALUE_MASK) {
1163 			scale++;
1164 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1165 		}
1166 		if (scale > E1000_LTRV_SCALE_MAX) {
1167 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1168 			return -E1000_ERR_CONFIG;
1169 		}
1170 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1171 
1172 		/* Determine the maximum latency tolerated by the platform */
1173 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1174 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1175 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1176 
1177 		if (lat_enc > max_ltr_enc) {
1178 			lat_enc = max_ltr_enc;
1179 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1180 		}
1181 
1182 		if (lat_ns) {
1183 			lat_ns *= speed * 1000;
1184 			lat_ns /= 8;
1185 			lat_ns /= 1000000000;
1186 			obff_hwm = (s32)(rxa - lat_ns);
1187 		}
1188 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1189 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1190 			return -E1000_ERR_CONFIG;
1191 		}
1192 	}
1193 
1194 	/* Set Snoop and No-Snoop latencies the same */
1195 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1196 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1197 
1198 	/* Set OBFF high water mark */
1199 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1200 	reg |= obff_hwm;
1201 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1202 
1203 	/* Enable OBFF */
1204 	reg = E1000_READ_REG(hw, E1000_SVCR);
1205 	reg |= E1000_SVCR_OFF_EN;
1206 	/* Always unblock interrupts to the CPU even when the system is
1207 	 * in OBFF mode. This ensures that small round-robin traffic
1208 	 * (like ping) does not get dropped or experience long latency.
1209 	 */
1210 	reg |= E1000_SVCR_OFF_MASKINT;
1211 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1212 
1213 	return E1000_SUCCESS;
1214 }
1215 
1216 /**
1217  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1218  *  @hw: pointer to the HW structure
1219  *  @itr: interrupt throttling rate
1220  *
1221  *  Configure OBFF with the updated interrupt rate.
1222  **/
1223 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1224 {
1225 	u32 svcr;
1226 	s32 timer;
1227 
1228 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1229 
1230 	/* Convert ITR value into microseconds for OBFF timer */
1231 	timer = itr & E1000_ITR_MASK;
1232 	timer = (timer * E1000_ITR_MULT) / 1000;
1233 
1234 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1235 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1236 		return -E1000_ERR_CONFIG;
1237 	}
1238 
1239 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1240 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1241 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1242 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1243 
1244 	return E1000_SUCCESS;
1245 }
1246 
1247 /**
1248  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1249  *  @hw: pointer to the HW structure
1250  *  @to_sx: boolean indicating a system power state transition to Sx
1251  *
1252  *  When link is down, configure ULP mode to significantly reduce the power
1253  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1254  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1255  *  system, configure the ULP mode by software.
1256  */
1257 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1258 {
1259 	u32 mac_reg;
1260 	s32 ret_val = E1000_SUCCESS;
1261 	u16 phy_reg;
1262 	u16 oem_reg = 0;
1263 
1264 	if ((hw->mac.type < e1000_pch_lpt) ||
1265 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1266 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1267 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1268 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1269 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1270 		return 0;
1271 
1272 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1273 		/* Request ME configure ULP mode in the PHY */
1274 		mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1275 		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1276 		E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1277 
1278 		goto out;
1279 	}
1280 
1281 	if (!to_sx) {
1282 		int i = 0;
1283 
1284 		/* Poll up to 5 seconds for Cable Disconnected indication */
1285 		while (!(E1000_READ_REG(hw, E1000_FEXT) &
1286 			 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1287 			/* Bail if link is re-acquired */
1288 			if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1289 				return -E1000_ERR_PHY;
1290 
1291 			if (i++ == 100)
1292 				break;
1293 
1294 			msec_delay(50);
1295 		}
1296 		DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1297 			 (E1000_READ_REG(hw, E1000_FEXT) &
1298 			  E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1299 			 i * 50);
1300 	}
1301 
1302 	ret_val = hw->phy.ops.acquire(hw);
1303 	if (ret_val)
1304 		goto out;
1305 
1306 	/* Force SMBus mode in PHY */
1307 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1308 	if (ret_val)
1309 		goto release;
1310 	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1311 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1312 
1313 	/* Force SMBus mode in MAC */
1314 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1315 	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1316 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1317 
1318 	/* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1319 	 * LPLU and disable Gig speed when entering ULP
1320 	 */
1321 	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1322 		ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1323 						       &oem_reg);
1324 		if (ret_val)
1325 			goto release;
1326 
1327 		phy_reg = oem_reg;
1328 		phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1329 
1330 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1331 							phy_reg);
1332 
1333 		if (ret_val)
1334 			goto release;
1335 	}
1336 
1337 	/* Set Inband ULP Exit, Reset to SMBus mode and
1338 	 * Disable SMBus Release on PERST# in PHY
1339 	 */
1340 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1341 	if (ret_val)
1342 		goto release;
1343 	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1344 		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1345 	if (to_sx) {
1346 		if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1347 			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1348 		else
1349 			phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1350 
1351 		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1352 		phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1353 	} else {
1354 		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1355 		phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1356 		phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1357 	}
1358 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1359 
1360 	/* Set Disable SMBus Release on PERST# in MAC */
1361 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1362 	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1363 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1364 
1365 	/* Commit ULP changes in PHY by starting auto ULP configuration */
1366 	phy_reg |= I218_ULP_CONFIG1_START;
1367 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1368 
1369 	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1370 	    to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1371 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1372 							oem_reg);
1373 		if (ret_val)
1374 			goto release;
1375 	}
1376 
1377 release:
1378 	hw->phy.ops.release(hw);
1379 out:
1380 	if (ret_val)
1381 		DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1382 	else
1383 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1384 
1385 	return ret_val;
1386 }
1387 
1388 /**
1389  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1390  *  @hw: pointer to the HW structure
1391  *  @force: boolean indicating whether or not to force disabling ULP
1392  *
1393  *  Un-configure ULP mode when link is up, the system is transitioned from
1394  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1395  *  system, poll for an indication from ME that ULP has been un-configured.
1396  *  If not on an ME enabled system, un-configure the ULP mode by software.
1397  *
1398  *  During nominal operation, this function is called when link is acquired
1399  *  to disable ULP mode (force=FALSE); otherwise, for example when unloading
1400  *  the driver or during Sx->S0 transitions, this is called with force=TRUE
1401  *  to forcibly disable ULP.
1402  */
1403 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1404 {
1405 	s32 ret_val = E1000_SUCCESS;
1406 	u8 ulp_exit_timeout = 30;
1407 	u32 mac_reg;
1408 	u16 phy_reg;
1409 	int i = 0;
1410 
1411 	if ((hw->mac.type < e1000_pch_lpt) ||
1412 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1413 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1414 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1415 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1416 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1417 		return 0;
1418 
1419 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1420 		if (force) {
1421 			/* Request ME un-configure ULP mode in the PHY */
1422 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1423 			mac_reg &= ~E1000_H2ME_ULP;
1424 			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1425 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1426 		}
1427 
1428 		if (hw->mac.type == e1000_pch_cnp)
1429 			ulp_exit_timeout = 100;
1430 
1431 		while (E1000_READ_REG(hw, E1000_FWSM) &
1432 		       E1000_FWSM_ULP_CFG_DONE) {
1433 			if (i++ == ulp_exit_timeout) {
1434 				ret_val = -E1000_ERR_PHY;
1435 				goto out;
1436 			}
1437 
1438 			msec_delay(10);
1439 		}
1440 		DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1441 
1442 		if (force) {
1443 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1444 			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1445 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1446 		} else {
1447 			/* Clear H2ME.ULP after ME ULP configuration */
1448 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1449 			mac_reg &= ~E1000_H2ME_ULP;
1450 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1451 		}
1452 
1453 		goto out;
1454 	}
1455 
1456 	ret_val = hw->phy.ops.acquire(hw);
1457 	if (ret_val)
1458 		goto out;
1459 
1460 	if (force)
1461 		/* Toggle LANPHYPC Value bit */
1462 		e1000_toggle_lanphypc_pch_lpt(hw);
1463 
1464 	/* Unforce SMBus mode in PHY */
1465 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1466 	if (ret_val) {
1467 		/* The MAC might be in PCIe mode, so temporarily force to
1468 		 * SMBus mode in order to access the PHY.
1469 		 */
1470 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1471 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1472 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1473 
1474 		msec_delay(50);
1475 
1476 		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1477 						       &phy_reg);
1478 		if (ret_val)
1479 			goto release;
1480 	}
1481 	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1482 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1483 
1484 	/* Unforce SMBus mode in MAC */
1485 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1486 	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1487 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1488 
1489 	/* When ULP mode was previously entered, K1 was disabled by the
1490 	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1491 	 */
1492 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1493 	if (ret_val)
1494 		goto release;
1495 	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1496 	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1497 
1498 	/* Clear ULP enabled configuration */
1499 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1500 	if (ret_val)
1501 		goto release;
1502 	phy_reg &= ~(I218_ULP_CONFIG1_IND |
1503 		     I218_ULP_CONFIG1_STICKY_ULP |
1504 		     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1505 		     I218_ULP_CONFIG1_WOL_HOST |
1506 		     I218_ULP_CONFIG1_INBAND_EXIT |
1507 		     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1508 		     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1509 		     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1510 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1511 
1512 	/* Commit ULP changes by starting auto ULP configuration */
1513 	phy_reg |= I218_ULP_CONFIG1_START;
1514 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1515 
1516 	/* Clear Disable SMBus Release on PERST# in MAC */
1517 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1518 	mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1519 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1520 
1521 release:
1522 	hw->phy.ops.release(hw);
1523 	if (force) {
1524 		hw->phy.ops.reset(hw);
1525 		msec_delay(50);
1526 	}
1527 out:
1528 	if (ret_val)
1529 		DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1530 	else
1531 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1532 
1533 	return ret_val;
1534 }
1535 
1536 
1537 
1538 /**
1539  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1540  *  @hw: pointer to the HW structure
1541  *
1542  *  Checks to see of the link status of the hardware has changed.  If a
1543  *  change in link status has been detected, then we read the PHY registers
1544  *  to get the current speed/duplex if link exists.
1545  **/
1546 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1547 {
1548 	struct e1000_mac_info *mac = &hw->mac;
1549 	s32 ret_val, tipg_reg = 0;
1550 	u16 emi_addr, emi_val = 0;
1551 	bool link;
1552 	u16 phy_reg;
1553 
1554 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1555 
1556 	/* We only want to go out to the PHY registers to see if Auto-Neg
1557 	 * has completed and/or if our link status has changed.  The
1558 	 * get_link_status flag is set upon receiving a Link Status
1559 	 * Change or Rx Sequence Error interrupt.
1560 	 */
1561 	if (!mac->get_link_status)
1562 		return E1000_SUCCESS;
1563 
1564 	/* First we want to see if the MII Status Register reports
1565 	 * link.  If so, then we want to get the current speed/duplex
1566 	 * of the PHY.
1567 	 */
1568 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1569 	if (ret_val)
1570 		return ret_val;
1571 
1572 	if (hw->mac.type == e1000_pchlan) {
1573 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1574 		if (ret_val)
1575 			return ret_val;
1576 	}
1577 
1578 	/* When connected at 10Mbps half-duplex, some parts are excessively
1579 	 * aggressive resulting in many collisions. To avoid this, increase
1580 	 * the IPG and reduce Rx latency in the PHY.
1581 	 */
1582 	if ((hw->mac.type >= e1000_pch2lan) && link) {
1583 		u16 speed, duplex;
1584 
1585 		e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1586 		tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1587 		tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1588 
1589 		if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1590 			tipg_reg |= 0xFF;
1591 			/* Reduce Rx latency in analog PHY */
1592 			emi_val = 0;
1593 		} else if (hw->mac.type >= e1000_pch_spt &&
1594 			   duplex == FULL_DUPLEX && speed != SPEED_1000) {
1595 			tipg_reg |= 0xC;
1596 			emi_val = 1;
1597 		} else {
1598 			/* Roll back the default values */
1599 			tipg_reg |= 0x08;
1600 			emi_val = 1;
1601 		}
1602 
1603 		E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1604 
1605 		ret_val = hw->phy.ops.acquire(hw);
1606 		if (ret_val)
1607 			return ret_val;
1608 
1609 		if (hw->mac.type == e1000_pch2lan)
1610 			emi_addr = I82579_RX_CONFIG;
1611 		else
1612 			emi_addr = I217_RX_CONFIG;
1613 		ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1614 
1615 
1616 		if (hw->mac.type >= e1000_pch_lpt) {
1617 			hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1618 						    &phy_reg);
1619 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1620 			if (speed == SPEED_100 || speed == SPEED_10)
1621 				phy_reg |= 0x3E8;
1622 			else
1623 				phy_reg |= 0xFA;
1624 			hw->phy.ops.write_reg_locked(hw,
1625 						     I217_PLL_CLOCK_GATE_REG,
1626 						     phy_reg);
1627 
1628 			if (speed == SPEED_1000) {
1629 				hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1630 							    &phy_reg);
1631 
1632 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1633 
1634 				hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1635 							     phy_reg);
1636 				}
1637 		 }
1638 		hw->phy.ops.release(hw);
1639 
1640 		if (ret_val)
1641 			return ret_val;
1642 
1643 		if (hw->mac.type >= e1000_pch_spt) {
1644 			u16 data;
1645 			u16 ptr_gap;
1646 
1647 			if (speed == SPEED_1000) {
1648 				ret_val = hw->phy.ops.acquire(hw);
1649 				if (ret_val)
1650 					return ret_val;
1651 
1652 				ret_val = hw->phy.ops.read_reg_locked(hw,
1653 							      PHY_REG(776, 20),
1654 							      &data);
1655 				if (ret_val) {
1656 					hw->phy.ops.release(hw);
1657 					return ret_val;
1658 				}
1659 
1660 				ptr_gap = (data & (0x3FF << 2)) >> 2;
1661 				if (ptr_gap < 0x18) {
1662 					data &= ~(0x3FF << 2);
1663 					data |= (0x18 << 2);
1664 					ret_val =
1665 						hw->phy.ops.write_reg_locked(hw,
1666 							PHY_REG(776, 20), data);
1667 				}
1668 				hw->phy.ops.release(hw);
1669 				if (ret_val)
1670 					return ret_val;
1671 			} else {
1672 				ret_val = hw->phy.ops.acquire(hw);
1673 				if (ret_val)
1674 					return ret_val;
1675 
1676 				ret_val = hw->phy.ops.write_reg_locked(hw,
1677 							     PHY_REG(776, 20),
1678 							     0xC023);
1679 				hw->phy.ops.release(hw);
1680 				if (ret_val)
1681 					return ret_val;
1682 
1683 			}
1684 		}
1685 	}
1686 
1687 	/* I217 Packet Loss issue:
1688 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
1689 	 * on power up.
1690 	 * Set the Beacon Duration for I217 to 8 usec
1691 	 */
1692 	if (hw->mac.type >= e1000_pch_lpt) {
1693 		u32 mac_reg;
1694 
1695 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1696 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1697 		mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1698 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1699 	}
1700 
1701 	/* Work-around I218 hang issue */
1702 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1703 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1704 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1705 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1706 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1707 		if (ret_val)
1708 			return ret_val;
1709 	}
1710 	if (hw->mac.type >= e1000_pch_lpt) {
1711 		/* Set platform power management values for
1712 		 * Latency Tolerance Reporting (LTR)
1713 		 * Optimized Buffer Flush/Fill (OBFF)
1714 		 */
1715 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1716 		if (ret_val)
1717 			return ret_val;
1718 	}
1719 
1720 	/* Clear link partner's EEE ability */
1721 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1722 
1723 	if (hw->mac.type >= e1000_pch_lpt) {
1724 		u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1725 
1726 		if (hw->mac.type == e1000_pch_spt) {
1727 			/* FEXTNVM6 K1-off workaround - for SPT only */
1728 			u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1729 
1730 			if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1731 				fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1732 			else
1733 				fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1734 		}
1735 
1736 		if (hw->dev_spec.ich8lan.disable_k1_off == TRUE)
1737 			fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1738 
1739 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1740 	}
1741 
1742 	if (!link)
1743 		return E1000_SUCCESS; /* No link detected */
1744 
1745 	mac->get_link_status = FALSE;
1746 
1747 	switch (hw->mac.type) {
1748 	case e1000_pch2lan:
1749 		ret_val = e1000_k1_workaround_lv(hw);
1750 		if (ret_val)
1751 			return ret_val;
1752 		/* fall-thru */
1753 	case e1000_pchlan:
1754 		if (hw->phy.type == e1000_phy_82578) {
1755 			ret_val = e1000_link_stall_workaround_hv(hw);
1756 			if (ret_val)
1757 				return ret_val;
1758 		}
1759 
1760 		/* Workaround for PCHx parts in half-duplex:
1761 		 * Set the number of preambles removed from the packet
1762 		 * when it is passed from the PHY to the MAC to prevent
1763 		 * the MAC from misinterpreting the packet type.
1764 		 */
1765 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1766 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1767 
1768 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1769 		    E1000_STATUS_FD)
1770 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1771 
1772 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1773 		break;
1774 	default:
1775 		break;
1776 	}
1777 
1778 	/* Check if there was DownShift, must be checked
1779 	 * immediately after link-up
1780 	 */
1781 	e1000_check_downshift_generic(hw);
1782 
1783 	/* Enable/Disable EEE after link up */
1784 	if (hw->phy.type > e1000_phy_82579) {
1785 		ret_val = e1000_set_eee_pchlan(hw);
1786 		if (ret_val)
1787 			return ret_val;
1788 	}
1789 
1790 	/* If we are forcing speed/duplex, then we simply return since
1791 	 * we have already determined whether we have link or not.
1792 	 */
1793 	if (!mac->autoneg)
1794 		return -E1000_ERR_CONFIG;
1795 
1796 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1797 	 * of MAC speed/duplex configuration.  So we only need to
1798 	 * configure Collision Distance in the MAC.
1799 	 */
1800 	mac->ops.config_collision_dist(hw);
1801 
1802 	/* Configure Flow Control now that Auto-Neg has completed.
1803 	 * First, we need to restore the desired flow control
1804 	 * settings because we may have had to re-autoneg with a
1805 	 * different link partner.
1806 	 */
1807 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1808 	if (ret_val)
1809 		DEBUGOUT("Error configuring flow control\n");
1810 
1811 	return ret_val;
1812 }
1813 
1814 /**
1815  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1816  *  @hw: pointer to the HW structure
1817  *
1818  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1819  **/
1820 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1821 {
1822 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1823 
1824 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1825 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1826 	switch (hw->mac.type) {
1827 	case e1000_ich8lan:
1828 	case e1000_ich9lan:
1829 	case e1000_ich10lan:
1830 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1831 		break;
1832 	case e1000_pchlan:
1833 	case e1000_pch2lan:
1834 	case e1000_pch_lpt:
1835 	case e1000_pch_spt:
1836 	case e1000_pch_cnp:
1837 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1838 		break;
1839 	default:
1840 		break;
1841 	}
1842 }
1843 
1844 /**
1845  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1846  *  @hw: pointer to the HW structure
1847  *
1848  *  Acquires the mutex for performing NVM operations.
1849  **/
1850 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1851 {
1852 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1853 
1854 	return E1000_SUCCESS;
1855 }
1856 
1857 /**
1858  *  e1000_release_nvm_ich8lan - Release NVM mutex
1859  *  @hw: pointer to the HW structure
1860  *
1861  *  Releases the mutex used while performing NVM operations.
1862  **/
1863 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1864 {
1865 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1866 
1867 	return;
1868 }
1869 
1870 /**
1871  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1872  *  @hw: pointer to the HW structure
1873  *
1874  *  Acquires the software control flag for performing PHY and select
1875  *  MAC CSR accesses.
1876  **/
1877 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1878 {
1879 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1880 	s32 ret_val = E1000_SUCCESS;
1881 
1882 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1883 
1884 	while (timeout) {
1885 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1886 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1887 			break;
1888 
1889 		msec_delay_irq(1);
1890 		timeout--;
1891 	}
1892 
1893 	if (!timeout) {
1894 		DEBUGOUT("SW has already locked the resource.\n");
1895 		ret_val = -E1000_ERR_CONFIG;
1896 		goto out;
1897 	}
1898 
1899 	timeout = SW_FLAG_TIMEOUT;
1900 
1901 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1902 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1903 
1904 	while (timeout) {
1905 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1906 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1907 			break;
1908 
1909 		msec_delay_irq(1);
1910 		timeout--;
1911 	}
1912 
1913 	if (!timeout) {
1914 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1915 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1916 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1917 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1918 		ret_val = -E1000_ERR_CONFIG;
1919 		goto out;
1920 	}
1921 
1922 out:
1923 	return ret_val;
1924 }
1925 
1926 /**
1927  *  e1000_release_swflag_ich8lan - Release software control flag
1928  *  @hw: pointer to the HW structure
1929  *
1930  *  Releases the software control flag for performing PHY and select
1931  *  MAC CSR accesses.
1932  **/
1933 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1934 {
1935 	u32 extcnf_ctrl;
1936 
1937 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1938 
1939 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1940 
1941 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1942 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1943 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1944 	} else {
1945 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1946 	}
1947 
1948 	return;
1949 }
1950 
1951 /**
1952  *  e1000_check_mng_mode_ich8lan - Checks management mode
1953  *  @hw: pointer to the HW structure
1954  *
1955  *  This checks if the adapter has any manageability enabled.
1956  *  This is a function pointer entry point only called by read/write
1957  *  routines for the PHY and NVM parts.
1958  **/
1959 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1960 {
1961 	u32 fwsm;
1962 
1963 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1964 
1965 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1966 
1967 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1968 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1969 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1970 }
1971 
1972 /**
1973  *  e1000_check_mng_mode_pchlan - Checks management mode
1974  *  @hw: pointer to the HW structure
1975  *
1976  *  This checks if the adapter has iAMT enabled.
1977  *  This is a function pointer entry point only called by read/write
1978  *  routines for the PHY and NVM parts.
1979  **/
1980 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1981 {
1982 	u32 fwsm;
1983 
1984 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1985 
1986 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1987 
1988 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1989 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1990 }
1991 
1992 /**
1993  *  e1000_rar_set_pch2lan - Set receive address register
1994  *  @hw: pointer to the HW structure
1995  *  @addr: pointer to the receive address
1996  *  @index: receive address array register
1997  *
1998  *  Sets the receive address array register at index to the address passed
1999  *  in by addr.  For 82579, RAR[0] is the base address register that is to
2000  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2001  *  Use SHRA[0-3] in place of those reserved for ME.
2002  **/
2003 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2004 {
2005 	u32 rar_low, rar_high;
2006 
2007 	DEBUGFUNC("e1000_rar_set_pch2lan");
2008 
2009 	/* HW expects these in little endian so we reverse the byte order
2010 	 * from network order (big endian) to little endian
2011 	 */
2012 	rar_low = ((u32) addr[0] |
2013 		   ((u32) addr[1] << 8) |
2014 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2015 
2016 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2017 
2018 	/* If MAC address zero, no need to set the AV bit */
2019 	if (rar_low || rar_high)
2020 		rar_high |= E1000_RAH_AV;
2021 
2022 	if (index == 0) {
2023 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2024 		E1000_WRITE_FLUSH(hw);
2025 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2026 		E1000_WRITE_FLUSH(hw);
2027 		return E1000_SUCCESS;
2028 	}
2029 
2030 	/* RAR[1-6] are owned by manageability.  Skip those and program the
2031 	 * next address into the SHRA register array.
2032 	 */
2033 	if (index < (u32) (hw->mac.rar_entry_count)) {
2034 		s32 ret_val;
2035 
2036 		ret_val = e1000_acquire_swflag_ich8lan(hw);
2037 		if (ret_val)
2038 			goto out;
2039 
2040 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2041 		E1000_WRITE_FLUSH(hw);
2042 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2043 		E1000_WRITE_FLUSH(hw);
2044 
2045 		e1000_release_swflag_ich8lan(hw);
2046 
2047 		/* verify the register updates */
2048 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2049 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2050 			return E1000_SUCCESS;
2051 
2052 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2053 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2054 	}
2055 
2056 out:
2057 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2058 	return -E1000_ERR_CONFIG;
2059 }
2060 
2061 /**
2062  *  e1000_rar_set_pch_lpt - Set receive address registers
2063  *  @hw: pointer to the HW structure
2064  *  @addr: pointer to the receive address
2065  *  @index: receive address array register
2066  *
2067  *  Sets the receive address register array at index to the address passed
2068  *  in by addr. For LPT, RAR[0] is the base address register that is to
2069  *  contain the MAC address. SHRA[0-10] are the shared receive address
2070  *  registers that are shared between the Host and manageability engine (ME).
2071  **/
2072 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2073 {
2074 	u32 rar_low, rar_high;
2075 	u32 wlock_mac;
2076 
2077 	DEBUGFUNC("e1000_rar_set_pch_lpt");
2078 
2079 	/* HW expects these in little endian so we reverse the byte order
2080 	 * from network order (big endian) to little endian
2081 	 */
2082 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2083 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2084 
2085 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2086 
2087 	/* If MAC address zero, no need to set the AV bit */
2088 	if (rar_low || rar_high)
2089 		rar_high |= E1000_RAH_AV;
2090 
2091 	if (index == 0) {
2092 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2093 		E1000_WRITE_FLUSH(hw);
2094 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2095 		E1000_WRITE_FLUSH(hw);
2096 		return E1000_SUCCESS;
2097 	}
2098 
2099 	/* The manageability engine (ME) can lock certain SHRAR registers that
2100 	 * it is using - those registers are unavailable for use.
2101 	 */
2102 	if (index < hw->mac.rar_entry_count) {
2103 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2104 			    E1000_FWSM_WLOCK_MAC_MASK;
2105 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2106 
2107 		/* Check if all SHRAR registers are locked */
2108 		if (wlock_mac == 1)
2109 			goto out;
2110 
2111 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
2112 			s32 ret_val;
2113 
2114 			ret_val = e1000_acquire_swflag_ich8lan(hw);
2115 
2116 			if (ret_val)
2117 				goto out;
2118 
2119 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2120 					rar_low);
2121 			E1000_WRITE_FLUSH(hw);
2122 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2123 					rar_high);
2124 			E1000_WRITE_FLUSH(hw);
2125 
2126 			e1000_release_swflag_ich8lan(hw);
2127 
2128 			/* verify the register updates */
2129 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2130 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2131 				return E1000_SUCCESS;
2132 		}
2133 	}
2134 
2135 out:
2136 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2137 	return -E1000_ERR_CONFIG;
2138 }
2139 
2140 /**
2141  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2142  *  @hw: pointer to the HW structure
2143  *  @mc_addr_list: array of multicast addresses to program
2144  *  @mc_addr_count: number of multicast addresses to program
2145  *
2146  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2147  *  The caller must have a packed mc_addr_list of multicast addresses.
2148  **/
2149 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2150 					      u8 *mc_addr_list,
2151 					      u32 mc_addr_count)
2152 {
2153 	u16 phy_reg = 0;
2154 	int i;
2155 	s32 ret_val;
2156 
2157 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2158 
2159 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2160 
2161 	ret_val = hw->phy.ops.acquire(hw);
2162 	if (ret_val)
2163 		return;
2164 
2165 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2166 	if (ret_val)
2167 		goto release;
2168 
2169 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
2170 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2171 					   (u16)(hw->mac.mta_shadow[i] &
2172 						 0xFFFF));
2173 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2174 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
2175 						 0xFFFF));
2176 	}
2177 
2178 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2179 
2180 release:
2181 	hw->phy.ops.release(hw);
2182 }
2183 
2184 /**
2185  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2186  *  @hw: pointer to the HW structure
2187  *
2188  *  Checks if firmware is blocking the reset of the PHY.
2189  *  This is a function pointer entry point only called by
2190  *  reset routines.
2191  **/
2192 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2193 {
2194 	u32 fwsm;
2195 	bool blocked = FALSE;
2196 	int i = 0;
2197 
2198 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
2199 
2200 	do {
2201 		fwsm = E1000_READ_REG(hw, E1000_FWSM);
2202 		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2203 			blocked = TRUE;
2204 			msec_delay(10);
2205 			continue;
2206 		}
2207 		blocked = FALSE;
2208 	} while (blocked && (i++ < 30));
2209 	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2210 }
2211 
2212 /**
2213  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2214  *  @hw: pointer to the HW structure
2215  *
2216  *  Assumes semaphore already acquired.
2217  *
2218  **/
2219 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2220 {
2221 	u16 phy_data;
2222 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2223 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2224 		E1000_STRAP_SMT_FREQ_SHIFT;
2225 	s32 ret_val;
2226 
2227 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2228 
2229 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2230 	if (ret_val)
2231 		return ret_val;
2232 
2233 	phy_data &= ~HV_SMB_ADDR_MASK;
2234 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2235 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2236 
2237 	if (hw->phy.type == e1000_phy_i217) {
2238 		/* Restore SMBus frequency */
2239 		if (freq--) {
2240 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2241 			phy_data |= (freq & (1 << 0)) <<
2242 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
2243 			phy_data |= (freq & (1 << 1)) <<
2244 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2245 		} else {
2246 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
2247 		}
2248 	}
2249 
2250 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2251 }
2252 
2253 /**
2254  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2255  *  @hw:   pointer to the HW structure
2256  *
2257  *  SW should configure the LCD from the NVM extended configuration region
2258  *  as a workaround for certain parts.
2259  **/
2260 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2261 {
2262 	struct e1000_phy_info *phy = &hw->phy;
2263 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2264 	s32 ret_val = E1000_SUCCESS;
2265 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2266 
2267 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2268 
2269 	/* Initialize the PHY from the NVM on ICH platforms.  This
2270 	 * is needed due to an issue where the NVM configuration is
2271 	 * not properly autoloaded after power transitions.
2272 	 * Therefore, after each PHY reset, we will load the
2273 	 * configuration data out of the NVM manually.
2274 	 */
2275 	switch (hw->mac.type) {
2276 	case e1000_ich8lan:
2277 		if (phy->type != e1000_phy_igp_3)
2278 			return ret_val;
2279 
2280 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2281 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2282 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2283 			break;
2284 		}
2285 		/* Fall-thru */
2286 	case e1000_pchlan:
2287 	case e1000_pch2lan:
2288 	case e1000_pch_lpt:
2289 	case e1000_pch_spt:
2290 	case e1000_pch_cnp:
2291 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2292 		break;
2293 	default:
2294 		return ret_val;
2295 	}
2296 
2297 	ret_val = hw->phy.ops.acquire(hw);
2298 	if (ret_val)
2299 		return ret_val;
2300 
2301 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
2302 	if (!(data & sw_cfg_mask))
2303 		goto release;
2304 
2305 	/* Make sure HW does not configure LCD from PHY
2306 	 * extended configuration before SW configuration
2307 	 */
2308 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2309 	if ((hw->mac.type < e1000_pch2lan) &&
2310 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2311 			goto release;
2312 
2313 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2314 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2315 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2316 	if (!cnf_size)
2317 		goto release;
2318 
2319 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2320 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2321 
2322 	if (((hw->mac.type == e1000_pchlan) &&
2323 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2324 	    (hw->mac.type > e1000_pchlan)) {
2325 		/* HW configures the SMBus address and LEDs when the
2326 		 * OEM and LCD Write Enable bits are set in the NVM.
2327 		 * When both NVM bits are cleared, SW will configure
2328 		 * them instead.
2329 		 */
2330 		ret_val = e1000_write_smbus_addr(hw);
2331 		if (ret_val)
2332 			goto release;
2333 
2334 		data = E1000_READ_REG(hw, E1000_LEDCTL);
2335 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2336 							(u16)data);
2337 		if (ret_val)
2338 			goto release;
2339 	}
2340 
2341 	/* Configure LCD from extended configuration region. */
2342 
2343 	/* cnf_base_addr is in DWORD */
2344 	word_addr = (u16)(cnf_base_addr << 1);
2345 
2346 	for (i = 0; i < cnf_size; i++) {
2347 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2348 					   &reg_data);
2349 		if (ret_val)
2350 			goto release;
2351 
2352 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2353 					   1, &reg_addr);
2354 		if (ret_val)
2355 			goto release;
2356 
2357 		/* Save off the PHY page for future writes. */
2358 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2359 			phy_page = reg_data;
2360 			continue;
2361 		}
2362 
2363 		reg_addr &= PHY_REG_MASK;
2364 		reg_addr |= phy_page;
2365 
2366 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2367 						    reg_data);
2368 		if (ret_val)
2369 			goto release;
2370 	}
2371 
2372 release:
2373 	hw->phy.ops.release(hw);
2374 	return ret_val;
2375 }
2376 
2377 /**
2378  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2379  *  @hw:   pointer to the HW structure
2380  *  @link: link up bool flag
2381  *
2382  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2383  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2384  *  If link is down, the function will restore the default K1 setting located
2385  *  in the NVM.
2386  **/
2387 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2388 {
2389 	s32 ret_val = E1000_SUCCESS;
2390 	u16 status_reg = 0;
2391 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2392 
2393 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
2394 
2395 	if (hw->mac.type != e1000_pchlan)
2396 		return E1000_SUCCESS;
2397 
2398 	/* Wrap the whole flow with the sw flag */
2399 	ret_val = hw->phy.ops.acquire(hw);
2400 	if (ret_val)
2401 		return ret_val;
2402 
2403 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2404 	if (link) {
2405 		if (hw->phy.type == e1000_phy_82578) {
2406 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2407 							      &status_reg);
2408 			if (ret_val)
2409 				goto release;
2410 
2411 			status_reg &= (BM_CS_STATUS_LINK_UP |
2412 				       BM_CS_STATUS_RESOLVED |
2413 				       BM_CS_STATUS_SPEED_MASK);
2414 
2415 			if (status_reg == (BM_CS_STATUS_LINK_UP |
2416 					   BM_CS_STATUS_RESOLVED |
2417 					   BM_CS_STATUS_SPEED_1000))
2418 				k1_enable = FALSE;
2419 		}
2420 
2421 		if (hw->phy.type == e1000_phy_82577) {
2422 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2423 							      &status_reg);
2424 			if (ret_val)
2425 				goto release;
2426 
2427 			status_reg &= (HV_M_STATUS_LINK_UP |
2428 				       HV_M_STATUS_AUTONEG_COMPLETE |
2429 				       HV_M_STATUS_SPEED_MASK);
2430 
2431 			if (status_reg == (HV_M_STATUS_LINK_UP |
2432 					   HV_M_STATUS_AUTONEG_COMPLETE |
2433 					   HV_M_STATUS_SPEED_1000))
2434 				k1_enable = FALSE;
2435 		}
2436 
2437 		/* Link stall fix for link up */
2438 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2439 						       0x0100);
2440 		if (ret_val)
2441 			goto release;
2442 
2443 	} else {
2444 		/* Link stall fix for link down */
2445 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2446 						       0x4100);
2447 		if (ret_val)
2448 			goto release;
2449 	}
2450 
2451 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2452 
2453 release:
2454 	hw->phy.ops.release(hw);
2455 
2456 	return ret_val;
2457 }
2458 
2459 /**
2460  *  e1000_configure_k1_ich8lan - Configure K1 power state
2461  *  @hw: pointer to the HW structure
2462  *  @k1_enable: K1 state to configure
2463  *
2464  *  Configure the K1 power state based on the provided parameter.
2465  *  Assumes semaphore already acquired.
2466  *
2467  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2468  **/
2469 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2470 {
2471 	s32 ret_val;
2472 	u32 ctrl_reg = 0;
2473 	u32 ctrl_ext = 0;
2474 	u32 reg = 0;
2475 	u16 kmrn_reg = 0;
2476 
2477 	DEBUGFUNC("e1000_configure_k1_ich8lan");
2478 
2479 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2480 					     &kmrn_reg);
2481 	if (ret_val)
2482 		return ret_val;
2483 
2484 	if (k1_enable)
2485 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2486 	else
2487 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2488 
2489 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2490 					      kmrn_reg);
2491 	if (ret_val)
2492 		return ret_val;
2493 
2494 	usec_delay(20);
2495 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2496 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2497 
2498 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2499 	reg |= E1000_CTRL_FRCSPD;
2500 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2501 
2502 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2503 	E1000_WRITE_FLUSH(hw);
2504 	usec_delay(20);
2505 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2506 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2507 	E1000_WRITE_FLUSH(hw);
2508 	usec_delay(20);
2509 
2510 	return E1000_SUCCESS;
2511 }
2512 
2513 /**
2514  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2515  *  @hw:       pointer to the HW structure
2516  *  @d0_state: boolean if entering d0 or d3 device state
2517  *
2518  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2519  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2520  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2521  **/
2522 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2523 {
2524 	s32 ret_val = 0;
2525 	u32 mac_reg;
2526 	u16 oem_reg;
2527 
2528 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2529 
2530 	if (hw->mac.type < e1000_pchlan)
2531 		return ret_val;
2532 
2533 	ret_val = hw->phy.ops.acquire(hw);
2534 	if (ret_val)
2535 		return ret_val;
2536 
2537 	if (hw->mac.type == e1000_pchlan) {
2538 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2539 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2540 			goto release;
2541 	}
2542 
2543 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2544 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2545 		goto release;
2546 
2547 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2548 
2549 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2550 	if (ret_val)
2551 		goto release;
2552 
2553 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2554 
2555 	if (d0_state) {
2556 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2557 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2558 
2559 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2560 			oem_reg |= HV_OEM_BITS_LPLU;
2561 	} else {
2562 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2563 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2564 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2565 
2566 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2567 		    E1000_PHY_CTRL_NOND0A_LPLU))
2568 			oem_reg |= HV_OEM_BITS_LPLU;
2569 	}
2570 
2571 	/* Set Restart auto-neg to activate the bits */
2572 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2573 	    !hw->phy.ops.check_reset_block(hw))
2574 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2575 
2576 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2577 
2578 release:
2579 	hw->phy.ops.release(hw);
2580 
2581 	return ret_val;
2582 }
2583 
2584 
2585 /**
2586  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2587  *  @hw:   pointer to the HW structure
2588  **/
2589 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2590 {
2591 	s32 ret_val;
2592 	u16 data;
2593 
2594 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2595 
2596 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2597 	if (ret_val)
2598 		return ret_val;
2599 
2600 	data |= HV_KMRN_MDIO_SLOW;
2601 
2602 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2603 
2604 	return ret_val;
2605 }
2606 
2607 /**
2608  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2609  *  done after every PHY reset.
2610  *  @hw: pointer to the HW structure
2611  **/
2612 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2613 {
2614 	s32 ret_val = E1000_SUCCESS;
2615 	u16 phy_data;
2616 
2617 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2618 
2619 	if (hw->mac.type != e1000_pchlan)
2620 		return E1000_SUCCESS;
2621 
2622 	/* Set MDIO slow mode before any other MDIO access */
2623 	if (hw->phy.type == e1000_phy_82577) {
2624 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2625 		if (ret_val)
2626 			return ret_val;
2627 	}
2628 
2629 	if (((hw->phy.type == e1000_phy_82577) &&
2630 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2631 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2632 		/* Disable generation of early preamble */
2633 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2634 		if (ret_val)
2635 			return ret_val;
2636 
2637 		/* Preamble tuning for SSC */
2638 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2639 						0xA204);
2640 		if (ret_val)
2641 			return ret_val;
2642 	}
2643 
2644 	if (hw->phy.type == e1000_phy_82578) {
2645 		/* Return registers to default by doing a soft reset then
2646 		 * writing 0x3140 to the control register.
2647 		 */
2648 		if (hw->phy.revision < 2) {
2649 			e1000_phy_sw_reset_generic(hw);
2650 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2651 							0x3140);
2652 		}
2653 	}
2654 
2655 	/* Select page 0 */
2656 	ret_val = hw->phy.ops.acquire(hw);
2657 	if (ret_val)
2658 		return ret_val;
2659 
2660 	hw->phy.addr = 1;
2661 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2662 	hw->phy.ops.release(hw);
2663 	if (ret_val)
2664 		return ret_val;
2665 
2666 	/* Configure the K1 Si workaround during phy reset assuming there is
2667 	 * link so that it disables K1 if link is in 1Gbps.
2668 	 */
2669 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2670 	if (ret_val)
2671 		return ret_val;
2672 
2673 	/* Workaround for link disconnects on a busy hub in half duplex */
2674 	ret_val = hw->phy.ops.acquire(hw);
2675 	if (ret_val)
2676 		return ret_val;
2677 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2678 	if (ret_val)
2679 		goto release;
2680 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2681 					       phy_data & 0x00FF);
2682 	if (ret_val)
2683 		goto release;
2684 
2685 	/* set MSE higher to enable link to stay up when noise is high */
2686 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2687 release:
2688 	hw->phy.ops.release(hw);
2689 
2690 	return ret_val;
2691 }
2692 
2693 /**
2694  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2695  *  @hw:   pointer to the HW structure
2696  **/
2697 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2698 {
2699 	u32 mac_reg;
2700 	u16 i, phy_reg = 0;
2701 	s32 ret_val;
2702 
2703 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2704 
2705 	ret_val = hw->phy.ops.acquire(hw);
2706 	if (ret_val)
2707 		return;
2708 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2709 	if (ret_val)
2710 		goto release;
2711 
2712 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2713 	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2714 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2715 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2716 					   (u16)(mac_reg & 0xFFFF));
2717 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2718 					   (u16)((mac_reg >> 16) & 0xFFFF));
2719 
2720 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2721 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2722 					   (u16)(mac_reg & 0xFFFF));
2723 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2724 					   (u16)((mac_reg & E1000_RAH_AV)
2725 						 >> 16));
2726 	}
2727 
2728 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2729 
2730 release:
2731 	hw->phy.ops.release(hw);
2732 }
2733 
2734 static u32 e1000_calc_rx_da_crc(u8 mac[])
2735 {
2736 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2737 	u32 i, j, mask, crc;
2738 
2739 	DEBUGFUNC("e1000_calc_rx_da_crc");
2740 
2741 	crc = 0xffffffff;
2742 	for (i = 0; i < 6; i++) {
2743 		crc = crc ^ mac[i];
2744 		for (j = 8; j > 0; j--) {
2745 			mask = (crc & 1) * (-1);
2746 			crc = (crc >> 1) ^ (poly & mask);
2747 		}
2748 	}
2749 	return ~crc;
2750 }
2751 
2752 /**
2753  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2754  *  with 82579 PHY
2755  *  @hw: pointer to the HW structure
2756  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2757  **/
2758 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2759 {
2760 	s32 ret_val = E1000_SUCCESS;
2761 	u16 phy_reg, data;
2762 	u32 mac_reg;
2763 	u16 i;
2764 
2765 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2766 
2767 	if (hw->mac.type < e1000_pch2lan)
2768 		return E1000_SUCCESS;
2769 
2770 	/* disable Rx path while enabling/disabling workaround */
2771 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2772 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2773 					phy_reg | (1 << 14));
2774 	if (ret_val)
2775 		return ret_val;
2776 
2777 	if (enable) {
2778 		/* Write Rx addresses (rar_entry_count for RAL/H, and
2779 		 * SHRAL/H) and initial CRC values to the MAC
2780 		 */
2781 		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2782 			u8 mac_addr[ETH_ADDR_LEN] = {0};
2783 			u32 addr_high, addr_low;
2784 
2785 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2786 			if (!(addr_high & E1000_RAH_AV))
2787 				continue;
2788 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2789 			mac_addr[0] = (addr_low & 0xFF);
2790 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2791 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2792 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2793 			mac_addr[4] = (addr_high & 0xFF);
2794 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2795 
2796 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2797 					e1000_calc_rx_da_crc(mac_addr));
2798 		}
2799 
2800 		/* Write Rx addresses to the PHY */
2801 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2802 
2803 		/* Enable jumbo frame workaround in the MAC */
2804 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2805 		mac_reg &= ~(1 << 14);
2806 		mac_reg |= (7 << 15);
2807 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2808 
2809 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2810 		mac_reg |= E1000_RCTL_SECRC;
2811 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2812 
2813 		ret_val = e1000_read_kmrn_reg_generic(hw,
2814 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2815 						&data);
2816 		if (ret_val)
2817 			return ret_val;
2818 		ret_val = e1000_write_kmrn_reg_generic(hw,
2819 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2820 						data | (1 << 0));
2821 		if (ret_val)
2822 			return ret_val;
2823 		ret_val = e1000_read_kmrn_reg_generic(hw,
2824 						E1000_KMRNCTRLSTA_HD_CTRL,
2825 						&data);
2826 		if (ret_val)
2827 			return ret_val;
2828 		data &= ~(0xF << 8);
2829 		data |= (0xB << 8);
2830 		ret_val = e1000_write_kmrn_reg_generic(hw,
2831 						E1000_KMRNCTRLSTA_HD_CTRL,
2832 						data);
2833 		if (ret_val)
2834 			return ret_val;
2835 
2836 		/* Enable jumbo frame workaround in the PHY */
2837 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2838 		data &= ~(0x7F << 5);
2839 		data |= (0x37 << 5);
2840 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2841 		if (ret_val)
2842 			return ret_val;
2843 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2844 		data &= ~(1 << 13);
2845 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2846 		if (ret_val)
2847 			return ret_val;
2848 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2849 		data &= ~(0x3FF << 2);
2850 		data |= (E1000_TX_PTR_GAP << 2);
2851 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2852 		if (ret_val)
2853 			return ret_val;
2854 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2855 		if (ret_val)
2856 			return ret_val;
2857 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2858 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2859 						(1 << 10));
2860 		if (ret_val)
2861 			return ret_val;
2862 	} else {
2863 		/* Write MAC register values back to h/w defaults */
2864 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2865 		mac_reg &= ~(0xF << 14);
2866 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2867 
2868 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2869 		mac_reg &= ~E1000_RCTL_SECRC;
2870 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2871 
2872 		ret_val = e1000_read_kmrn_reg_generic(hw,
2873 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2874 						&data);
2875 		if (ret_val)
2876 			return ret_val;
2877 		ret_val = e1000_write_kmrn_reg_generic(hw,
2878 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2879 						data & ~(1 << 0));
2880 		if (ret_val)
2881 			return ret_val;
2882 		ret_val = e1000_read_kmrn_reg_generic(hw,
2883 						E1000_KMRNCTRLSTA_HD_CTRL,
2884 						&data);
2885 		if (ret_val)
2886 			return ret_val;
2887 		data &= ~(0xF << 8);
2888 		data |= (0xB << 8);
2889 		ret_val = e1000_write_kmrn_reg_generic(hw,
2890 						E1000_KMRNCTRLSTA_HD_CTRL,
2891 						data);
2892 		if (ret_val)
2893 			return ret_val;
2894 
2895 		/* Write PHY register values back to h/w defaults */
2896 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2897 		data &= ~(0x7F << 5);
2898 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2899 		if (ret_val)
2900 			return ret_val;
2901 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2902 		data |= (1 << 13);
2903 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2904 		if (ret_val)
2905 			return ret_val;
2906 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2907 		data &= ~(0x3FF << 2);
2908 		data |= (0x8 << 2);
2909 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2910 		if (ret_val)
2911 			return ret_val;
2912 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2913 		if (ret_val)
2914 			return ret_val;
2915 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2916 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2917 						~(1 << 10));
2918 		if (ret_val)
2919 			return ret_val;
2920 	}
2921 
2922 	/* re-enable Rx path after enabling/disabling workaround */
2923 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2924 				     ~(1 << 14));
2925 }
2926 
2927 /**
2928  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2929  *  done after every PHY reset.
2930  *  @hw: pointer to the HW structure
2931  **/
2932 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2933 {
2934 	s32 ret_val = E1000_SUCCESS;
2935 
2936 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2937 
2938 	if (hw->mac.type != e1000_pch2lan)
2939 		return E1000_SUCCESS;
2940 
2941 	/* Set MDIO slow mode before any other MDIO access */
2942 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2943 	if (ret_val)
2944 		return ret_val;
2945 
2946 	ret_val = hw->phy.ops.acquire(hw);
2947 	if (ret_val)
2948 		return ret_val;
2949 	/* set MSE higher to enable link to stay up when noise is high */
2950 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2951 	if (ret_val)
2952 		goto release;
2953 	/* drop link after 5 times MSE threshold was reached */
2954 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2955 release:
2956 	hw->phy.ops.release(hw);
2957 
2958 	return ret_val;
2959 }
2960 
2961 /**
2962  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2963  *  @hw:   pointer to the HW structure
2964  *
2965  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2966  *  Disable K1 for 1000 and 100 speeds
2967  **/
2968 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2969 {
2970 	s32 ret_val = E1000_SUCCESS;
2971 	u16 status_reg = 0;
2972 
2973 	DEBUGFUNC("e1000_k1_workaround_lv");
2974 
2975 	if (hw->mac.type != e1000_pch2lan)
2976 		return E1000_SUCCESS;
2977 
2978 	/* Set K1 beacon duration based on 10Mbs speed */
2979 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2980 	if (ret_val)
2981 		return ret_val;
2982 
2983 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2984 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2985 		if (status_reg &
2986 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2987 			u16 pm_phy_reg;
2988 
2989 			/* LV 1G/100 Packet drop issue wa  */
2990 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2991 						       &pm_phy_reg);
2992 			if (ret_val)
2993 				return ret_val;
2994 			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2995 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2996 							pm_phy_reg);
2997 			if (ret_val)
2998 				return ret_val;
2999 		} else {
3000 			u32 mac_reg;
3001 			mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3002 			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3003 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3004 			E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3005 		}
3006 	}
3007 
3008 	return ret_val;
3009 }
3010 
3011 /**
3012  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3013  *  @hw:   pointer to the HW structure
3014  *  @gate: boolean set to TRUE to gate, FALSE to ungate
3015  *
3016  *  Gate/ungate the automatic PHY configuration via hardware; perform
3017  *  the configuration via software instead.
3018  **/
3019 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3020 {
3021 	u32 extcnf_ctrl;
3022 
3023 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3024 
3025 	if (hw->mac.type < e1000_pch2lan)
3026 		return;
3027 
3028 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3029 
3030 	if (gate)
3031 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3032 	else
3033 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3034 
3035 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3036 }
3037 
3038 /**
3039  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3040  *  @hw: pointer to the HW structure
3041  *
3042  *  Check the appropriate indication the MAC has finished configuring the
3043  *  PHY after a software reset.
3044  **/
3045 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3046 {
3047 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3048 
3049 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
3050 
3051 	/* Wait for basic configuration completes before proceeding */
3052 	do {
3053 		data = E1000_READ_REG(hw, E1000_STATUS);
3054 		data &= E1000_STATUS_LAN_INIT_DONE;
3055 		usec_delay(100);
3056 	} while ((!data) && --loop);
3057 
3058 	/* If basic configuration is incomplete before the above loop
3059 	 * count reaches 0, loading the configuration from NVM will
3060 	 * leave the PHY in a bad state possibly resulting in no link.
3061 	 */
3062 	if (loop == 0)
3063 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3064 
3065 	/* Clear the Init Done bit for the next init event */
3066 	data = E1000_READ_REG(hw, E1000_STATUS);
3067 	data &= ~E1000_STATUS_LAN_INIT_DONE;
3068 	E1000_WRITE_REG(hw, E1000_STATUS, data);
3069 }
3070 
3071 /**
3072  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3073  *  @hw: pointer to the HW structure
3074  **/
3075 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3076 {
3077 	s32 ret_val = E1000_SUCCESS;
3078 	u16 reg;
3079 
3080 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3081 
3082 	if (hw->phy.ops.check_reset_block(hw))
3083 		return E1000_SUCCESS;
3084 
3085 	/* Allow time for h/w to get to quiescent state after reset */
3086 	msec_delay(10);
3087 
3088 	/* Perform any necessary post-reset workarounds */
3089 	switch (hw->mac.type) {
3090 	case e1000_pchlan:
3091 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3092 		if (ret_val)
3093 			return ret_val;
3094 		break;
3095 	case e1000_pch2lan:
3096 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3097 		if (ret_val)
3098 			return ret_val;
3099 		break;
3100 	default:
3101 		break;
3102 	}
3103 
3104 	/* Clear the host wakeup bit after lcd reset */
3105 	if (hw->mac.type >= e1000_pchlan) {
3106 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3107 		reg &= ~BM_WUC_HOST_WU_BIT;
3108 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3109 	}
3110 
3111 	/* Configure the LCD with the extended configuration region in NVM */
3112 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
3113 	if (ret_val)
3114 		return ret_val;
3115 
3116 	/* Configure the LCD with the OEM bits in NVM */
3117 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3118 
3119 	if (hw->mac.type == e1000_pch2lan) {
3120 		/* Ungate automatic PHY configuration on non-managed 82579 */
3121 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
3122 		    E1000_ICH_FWSM_FW_VALID)) {
3123 			msec_delay(10);
3124 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3125 		}
3126 
3127 		/* Set EEE LPI Update Timer to 200usec */
3128 		ret_val = hw->phy.ops.acquire(hw);
3129 		if (ret_val)
3130 			return ret_val;
3131 		ret_val = e1000_write_emi_reg_locked(hw,
3132 						     I82579_LPI_UPDATE_TIMER,
3133 						     0x1387);
3134 		hw->phy.ops.release(hw);
3135 	}
3136 
3137 	return ret_val;
3138 }
3139 
3140 /**
3141  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3142  *  @hw: pointer to the HW structure
3143  *
3144  *  Resets the PHY
3145  *  This is a function pointer entry point called by drivers
3146  *  or other shared routines.
3147  **/
3148 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3149 {
3150 	s32 ret_val = E1000_SUCCESS;
3151 
3152 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3153 
3154 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
3155 	if ((hw->mac.type == e1000_pch2lan) &&
3156 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3157 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3158 
3159 	ret_val = e1000_phy_hw_reset_generic(hw);
3160 	if (ret_val)
3161 		return ret_val;
3162 
3163 	return e1000_post_phy_reset_ich8lan(hw);
3164 }
3165 
3166 /**
3167  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3168  *  @hw: pointer to the HW structure
3169  *  @active: TRUE to enable LPLU, FALSE to disable
3170  *
3171  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3172  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3173  *  the phy speed. This function will manually set the LPLU bit and restart
3174  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3175  *  since it configures the same bit.
3176  **/
3177 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3178 {
3179 	s32 ret_val;
3180 	u16 oem_reg;
3181 
3182 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
3183 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3184 	if (ret_val)
3185 		return ret_val;
3186 
3187 	if (active)
3188 		oem_reg |= HV_OEM_BITS_LPLU;
3189 	else
3190 		oem_reg &= ~HV_OEM_BITS_LPLU;
3191 
3192 	if (!hw->phy.ops.check_reset_block(hw))
3193 		oem_reg |= HV_OEM_BITS_RESTART_AN;
3194 
3195 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3196 }
3197 
3198 /**
3199  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3200  *  @hw: pointer to the HW structure
3201  *  @active: TRUE to enable LPLU, FALSE to disable
3202  *
3203  *  Sets the LPLU D0 state according to the active flag.  When
3204  *  activating LPLU this function also disables smart speed
3205  *  and vice versa.  LPLU will not be activated unless the
3206  *  device autonegotiation advertisement meets standards of
3207  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3208  *  This is a function pointer entry point only called by
3209  *  PHY setup routines.
3210  **/
3211 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3212 {
3213 	struct e1000_phy_info *phy = &hw->phy;
3214 	u32 phy_ctrl;
3215 	s32 ret_val = E1000_SUCCESS;
3216 	u16 data;
3217 
3218 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3219 
3220 	if (phy->type == e1000_phy_ife)
3221 		return E1000_SUCCESS;
3222 
3223 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3224 
3225 	if (active) {
3226 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3227 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3228 
3229 		if (phy->type != e1000_phy_igp_3)
3230 			return E1000_SUCCESS;
3231 
3232 		/* Call gig speed drop workaround on LPLU before accessing
3233 		 * any PHY registers
3234 		 */
3235 		if (hw->mac.type == e1000_ich8lan)
3236 			e1000_gig_downshift_workaround_ich8lan(hw);
3237 
3238 		/* When LPLU is enabled, we should disable SmartSpeed */
3239 		ret_val = phy->ops.read_reg(hw,
3240 					    IGP01E1000_PHY_PORT_CONFIG,
3241 					    &data);
3242 		if (ret_val)
3243 			return ret_val;
3244 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3245 		ret_val = phy->ops.write_reg(hw,
3246 					     IGP01E1000_PHY_PORT_CONFIG,
3247 					     data);
3248 		if (ret_val)
3249 			return ret_val;
3250 	} else {
3251 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3252 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3253 
3254 		if (phy->type != e1000_phy_igp_3)
3255 			return E1000_SUCCESS;
3256 
3257 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3258 		 * during Dx states where the power conservation is most
3259 		 * important.  During driver activity we should enable
3260 		 * SmartSpeed, so performance is maintained.
3261 		 */
3262 		if (phy->smart_speed == e1000_smart_speed_on) {
3263 			ret_val = phy->ops.read_reg(hw,
3264 						    IGP01E1000_PHY_PORT_CONFIG,
3265 						    &data);
3266 			if (ret_val)
3267 				return ret_val;
3268 
3269 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3270 			ret_val = phy->ops.write_reg(hw,
3271 						     IGP01E1000_PHY_PORT_CONFIG,
3272 						     data);
3273 			if (ret_val)
3274 				return ret_val;
3275 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3276 			ret_val = phy->ops.read_reg(hw,
3277 						    IGP01E1000_PHY_PORT_CONFIG,
3278 						    &data);
3279 			if (ret_val)
3280 				return ret_val;
3281 
3282 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3283 			ret_val = phy->ops.write_reg(hw,
3284 						     IGP01E1000_PHY_PORT_CONFIG,
3285 						     data);
3286 			if (ret_val)
3287 				return ret_val;
3288 		}
3289 	}
3290 
3291 	return E1000_SUCCESS;
3292 }
3293 
3294 /**
3295  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3296  *  @hw: pointer to the HW structure
3297  *  @active: TRUE to enable LPLU, FALSE to disable
3298  *
3299  *  Sets the LPLU D3 state according to the active flag.  When
3300  *  activating LPLU this function also disables smart speed
3301  *  and vice versa.  LPLU will not be activated unless the
3302  *  device autonegotiation advertisement meets standards of
3303  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3304  *  This is a function pointer entry point only called by
3305  *  PHY setup routines.
3306  **/
3307 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3308 {
3309 	struct e1000_phy_info *phy = &hw->phy;
3310 	u32 phy_ctrl;
3311 	s32 ret_val = E1000_SUCCESS;
3312 	u16 data;
3313 
3314 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3315 
3316 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3317 
3318 	if (!active) {
3319 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3320 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3321 
3322 		if (phy->type != e1000_phy_igp_3)
3323 			return E1000_SUCCESS;
3324 
3325 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3326 		 * during Dx states where the power conservation is most
3327 		 * important.  During driver activity we should enable
3328 		 * SmartSpeed, so performance is maintained.
3329 		 */
3330 		if (phy->smart_speed == e1000_smart_speed_on) {
3331 			ret_val = phy->ops.read_reg(hw,
3332 						    IGP01E1000_PHY_PORT_CONFIG,
3333 						    &data);
3334 			if (ret_val)
3335 				return ret_val;
3336 
3337 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3338 			ret_val = phy->ops.write_reg(hw,
3339 						     IGP01E1000_PHY_PORT_CONFIG,
3340 						     data);
3341 			if (ret_val)
3342 				return ret_val;
3343 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3344 			ret_val = phy->ops.read_reg(hw,
3345 						    IGP01E1000_PHY_PORT_CONFIG,
3346 						    &data);
3347 			if (ret_val)
3348 				return ret_val;
3349 
3350 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3351 			ret_val = phy->ops.write_reg(hw,
3352 						     IGP01E1000_PHY_PORT_CONFIG,
3353 						     data);
3354 			if (ret_val)
3355 				return ret_val;
3356 		}
3357 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3358 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3359 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3360 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3361 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3362 
3363 		if (phy->type != e1000_phy_igp_3)
3364 			return E1000_SUCCESS;
3365 
3366 		/* Call gig speed drop workaround on LPLU before accessing
3367 		 * any PHY registers
3368 		 */
3369 		if (hw->mac.type == e1000_ich8lan)
3370 			e1000_gig_downshift_workaround_ich8lan(hw);
3371 
3372 		/* When LPLU is enabled, we should disable SmartSpeed */
3373 		ret_val = phy->ops.read_reg(hw,
3374 					    IGP01E1000_PHY_PORT_CONFIG,
3375 					    &data);
3376 		if (ret_val)
3377 			return ret_val;
3378 
3379 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3380 		ret_val = phy->ops.write_reg(hw,
3381 					     IGP01E1000_PHY_PORT_CONFIG,
3382 					     data);
3383 	}
3384 
3385 	return ret_val;
3386 }
3387 
3388 /**
3389  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3390  *  @hw: pointer to the HW structure
3391  *  @bank:  pointer to the variable that returns the active bank
3392  *
3393  *  Reads signature byte from the NVM using the flash access registers.
3394  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3395  **/
3396 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3397 {
3398 	u32 eecd;
3399 	struct e1000_nvm_info *nvm = &hw->nvm;
3400 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3401 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3402 	u32 nvm_dword = 0;
3403 	u8 sig_byte = 0;
3404 	s32 ret_val;
3405 
3406 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3407 
3408 	switch (hw->mac.type) {
3409 	case e1000_pch_spt:
3410 	case e1000_pch_cnp:
3411 		bank1_offset = nvm->flash_bank_size;
3412 		act_offset = E1000_ICH_NVM_SIG_WORD;
3413 
3414 		/* set bank to 0 in case flash read fails */
3415 		*bank = 0;
3416 
3417 		/* Check bank 0 */
3418 		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3419 							 &nvm_dword);
3420 		if (ret_val)
3421 			return ret_val;
3422 		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3423 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3424 		    E1000_ICH_NVM_SIG_VALUE) {
3425 			*bank = 0;
3426 			return E1000_SUCCESS;
3427 		}
3428 
3429 		/* Check bank 1 */
3430 		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3431 							 bank1_offset,
3432 							 &nvm_dword);
3433 		if (ret_val)
3434 			return ret_val;
3435 		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3436 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3437 		    E1000_ICH_NVM_SIG_VALUE) {
3438 			*bank = 1;
3439 			return E1000_SUCCESS;
3440 		}
3441 
3442 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3443 		return -E1000_ERR_NVM;
3444 	case e1000_ich8lan:
3445 	case e1000_ich9lan:
3446 		eecd = E1000_READ_REG(hw, E1000_EECD);
3447 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3448 		    E1000_EECD_SEC1VAL_VALID_MASK) {
3449 			if (eecd & E1000_EECD_SEC1VAL)
3450 				*bank = 1;
3451 			else
3452 				*bank = 0;
3453 
3454 			return E1000_SUCCESS;
3455 		}
3456 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3457 		/* fall-thru */
3458 	default:
3459 		/* set bank to 0 in case flash read fails */
3460 		*bank = 0;
3461 
3462 		/* Check bank 0 */
3463 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3464 							&sig_byte);
3465 		if (ret_val)
3466 			return ret_val;
3467 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3468 		    E1000_ICH_NVM_SIG_VALUE) {
3469 			*bank = 0;
3470 			return E1000_SUCCESS;
3471 		}
3472 
3473 		/* Check bank 1 */
3474 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3475 							bank1_offset,
3476 							&sig_byte);
3477 		if (ret_val)
3478 			return ret_val;
3479 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3480 		    E1000_ICH_NVM_SIG_VALUE) {
3481 			*bank = 1;
3482 			return E1000_SUCCESS;
3483 		}
3484 
3485 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3486 		return -E1000_ERR_NVM;
3487 	}
3488 }
3489 
3490 /**
3491  *  e1000_read_nvm_spt - NVM access for SPT
3492  *  @hw: pointer to the HW structure
3493  *  @offset: The offset (in bytes) of the word(s) to read.
3494  *  @words: Size of data to read in words.
3495  *  @data: pointer to the word(s) to read at offset.
3496  *
3497  *  Reads a word(s) from the NVM
3498  **/
3499 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3500 			      u16 *data)
3501 {
3502 	struct e1000_nvm_info *nvm = &hw->nvm;
3503 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3504 	u32 act_offset;
3505 	s32 ret_val = E1000_SUCCESS;
3506 	u32 bank = 0;
3507 	u32 dword = 0;
3508 	u16 offset_to_read;
3509 	u16 i;
3510 
3511 	DEBUGFUNC("e1000_read_nvm_spt");
3512 
3513 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3514 	    (words == 0)) {
3515 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3516 		ret_val = -E1000_ERR_NVM;
3517 		goto out;
3518 	}
3519 
3520 	nvm->ops.acquire(hw);
3521 
3522 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3523 	if (ret_val != E1000_SUCCESS) {
3524 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3525 		bank = 0;
3526 	}
3527 
3528 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3529 	act_offset += offset;
3530 
3531 	ret_val = E1000_SUCCESS;
3532 
3533 	for (i = 0; i < words; i += 2) {
3534 		if (words - i == 1) {
3535 			if (dev_spec->shadow_ram[offset + i].modified) {
3536 				data[i] =
3537 				    dev_spec->shadow_ram[offset + i].value;
3538 			} else {
3539 				offset_to_read = act_offset + i -
3540 						 ((act_offset + i) % 2);
3541 				ret_val =
3542 				   e1000_read_flash_dword_ich8lan(hw,
3543 								 offset_to_read,
3544 								 &dword);
3545 				if (ret_val)
3546 					break;
3547 				if ((act_offset + i) % 2 == 0)
3548 					data[i] = (u16)(dword & 0xFFFF);
3549 				else
3550 					data[i] = (u16)((dword >> 16) & 0xFFFF);
3551 			}
3552 		} else {
3553 			offset_to_read = act_offset + i;
3554 			if (!(dev_spec->shadow_ram[offset + i].modified) ||
3555 			    !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3556 				ret_val =
3557 				   e1000_read_flash_dword_ich8lan(hw,
3558 								 offset_to_read,
3559 								 &dword);
3560 				if (ret_val)
3561 					break;
3562 			}
3563 			if (dev_spec->shadow_ram[offset + i].modified)
3564 				data[i] =
3565 				    dev_spec->shadow_ram[offset + i].value;
3566 			else
3567 				data[i] = (u16)(dword & 0xFFFF);
3568 			if (dev_spec->shadow_ram[offset + i + 1].modified)
3569 				data[i + 1] =
3570 				   dev_spec->shadow_ram[offset + i + 1].value;
3571 			else
3572 				data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3573 		}
3574 	}
3575 
3576 	nvm->ops.release(hw);
3577 
3578 out:
3579 	if (ret_val)
3580 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3581 
3582 	return ret_val;
3583 }
3584 
3585 /**
3586  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3587  *  @hw: pointer to the HW structure
3588  *  @offset: The offset (in bytes) of the word(s) to read.
3589  *  @words: Size of data to read in words
3590  *  @data: Pointer to the word(s) to read at offset.
3591  *
3592  *  Reads a word(s) from the NVM using the flash access registers.
3593  **/
3594 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3595 				  u16 *data)
3596 {
3597 	struct e1000_nvm_info *nvm = &hw->nvm;
3598 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3599 	u32 act_offset;
3600 	s32 ret_val = E1000_SUCCESS;
3601 	u32 bank = 0;
3602 	u16 i, word;
3603 
3604 	DEBUGFUNC("e1000_read_nvm_ich8lan");
3605 
3606 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3607 	    (words == 0)) {
3608 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3609 		ret_val = -E1000_ERR_NVM;
3610 		goto out;
3611 	}
3612 
3613 	nvm->ops.acquire(hw);
3614 
3615 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3616 	if (ret_val != E1000_SUCCESS) {
3617 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3618 		bank = 0;
3619 	}
3620 
3621 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3622 	act_offset += offset;
3623 
3624 	ret_val = E1000_SUCCESS;
3625 	for (i = 0; i < words; i++) {
3626 		if (dev_spec->shadow_ram[offset + i].modified) {
3627 			data[i] = dev_spec->shadow_ram[offset + i].value;
3628 		} else {
3629 			ret_val = e1000_read_flash_word_ich8lan(hw,
3630 								act_offset + i,
3631 								&word);
3632 			if (ret_val)
3633 				break;
3634 			data[i] = word;
3635 		}
3636 	}
3637 
3638 	nvm->ops.release(hw);
3639 
3640 out:
3641 	if (ret_val)
3642 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3643 
3644 	return ret_val;
3645 }
3646 
3647 /**
3648  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3649  *  @hw: pointer to the HW structure
3650  *
3651  *  This function does initial flash setup so that a new read/write/erase cycle
3652  *  can be started.
3653  **/
3654 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3655 {
3656 	union ich8_hws_flash_status hsfsts;
3657 	s32 ret_val = -E1000_ERR_NVM;
3658 
3659 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3660 
3661 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3662 
3663 	/* Check if the flash descriptor is valid */
3664 	if (!hsfsts.hsf_status.fldesvalid) {
3665 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3666 		return -E1000_ERR_NVM;
3667 	}
3668 
3669 	/* Clear FCERR and DAEL in hw status by writing 1 */
3670 	hsfsts.hsf_status.flcerr = 1;
3671 	hsfsts.hsf_status.dael = 1;
3672 	if (hw->mac.type >= e1000_pch_spt)
3673 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3674 				      hsfsts.regval & 0xFFFF);
3675 	else
3676 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3677 
3678 	/* Either we should have a hardware SPI cycle in progress
3679 	 * bit to check against, in order to start a new cycle or
3680 	 * FDONE bit should be changed in the hardware so that it
3681 	 * is 1 after hardware reset, which can then be used as an
3682 	 * indication whether a cycle is in progress or has been
3683 	 * completed.
3684 	 */
3685 
3686 	if (!hsfsts.hsf_status.flcinprog) {
3687 		/* There is no cycle running at present,
3688 		 * so we can start a cycle.
3689 		 * Begin by setting Flash Cycle Done.
3690 		 */
3691 		hsfsts.hsf_status.flcdone = 1;
3692 		if (hw->mac.type >= e1000_pch_spt)
3693 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3694 					      hsfsts.regval & 0xFFFF);
3695 		else
3696 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3697 						hsfsts.regval);
3698 		ret_val = E1000_SUCCESS;
3699 	} else {
3700 		s32 i;
3701 
3702 		/* Otherwise poll for sometime so the current
3703 		 * cycle has a chance to end before giving up.
3704 		 */
3705 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3706 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3707 							      ICH_FLASH_HSFSTS);
3708 			if (!hsfsts.hsf_status.flcinprog) {
3709 				ret_val = E1000_SUCCESS;
3710 				break;
3711 			}
3712 			usec_delay(1);
3713 		}
3714 		if (ret_val == E1000_SUCCESS) {
3715 			/* Successful in waiting for previous cycle to timeout,
3716 			 * now set the Flash Cycle Done.
3717 			 */
3718 			hsfsts.hsf_status.flcdone = 1;
3719 			if (hw->mac.type >= e1000_pch_spt)
3720 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3721 						      hsfsts.regval & 0xFFFF);
3722 			else
3723 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3724 							hsfsts.regval);
3725 		} else {
3726 			DEBUGOUT("Flash controller busy, cannot get access\n");
3727 		}
3728 	}
3729 
3730 	return ret_val;
3731 }
3732 
3733 /**
3734  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3735  *  @hw: pointer to the HW structure
3736  *  @timeout: maximum time to wait for completion
3737  *
3738  *  This function starts a flash cycle and waits for its completion.
3739  **/
3740 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3741 {
3742 	union ich8_hws_flash_ctrl hsflctl;
3743 	union ich8_hws_flash_status hsfsts;
3744 	u32 i = 0;
3745 
3746 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3747 
3748 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3749 	if (hw->mac.type >= e1000_pch_spt)
3750 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3751 	else
3752 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3753 	hsflctl.hsf_ctrl.flcgo = 1;
3754 
3755 	if (hw->mac.type >= e1000_pch_spt)
3756 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3757 				      hsflctl.regval << 16);
3758 	else
3759 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3760 
3761 	/* wait till FDONE bit is set to 1 */
3762 	do {
3763 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3764 		if (hsfsts.hsf_status.flcdone)
3765 			break;
3766 		usec_delay(1);
3767 	} while (i++ < timeout);
3768 
3769 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3770 		return E1000_SUCCESS;
3771 
3772 	return -E1000_ERR_NVM;
3773 }
3774 
3775 /**
3776  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3777  *  @hw: pointer to the HW structure
3778  *  @offset: offset to data location
3779  *  @data: pointer to the location for storing the data
3780  *
3781  *  Reads the flash dword at offset into data.  Offset is converted
3782  *  to bytes before read.
3783  **/
3784 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3785 					  u32 *data)
3786 {
3787 	DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3788 
3789 	if (!data)
3790 		return -E1000_ERR_NVM;
3791 
3792 	/* Must convert word offset into bytes. */
3793 	offset <<= 1;
3794 
3795 	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3796 }
3797 
3798 /**
3799  *  e1000_read_flash_word_ich8lan - Read word from flash
3800  *  @hw: pointer to the HW structure
3801  *  @offset: offset to data location
3802  *  @data: pointer to the location for storing the data
3803  *
3804  *  Reads the flash word at offset into data.  Offset is converted
3805  *  to bytes before read.
3806  **/
3807 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3808 					 u16 *data)
3809 {
3810 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3811 
3812 	if (!data)
3813 		return -E1000_ERR_NVM;
3814 
3815 	/* Must convert offset into bytes. */
3816 	offset <<= 1;
3817 
3818 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3819 }
3820 
3821 /**
3822  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3823  *  @hw: pointer to the HW structure
3824  *  @offset: The offset of the byte to read.
3825  *  @data: Pointer to a byte to store the value read.
3826  *
3827  *  Reads a single byte from the NVM using the flash access registers.
3828  **/
3829 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3830 					 u8 *data)
3831 {
3832 	s32 ret_val;
3833 	u16 word = 0;
3834 
3835 	/* In SPT, only 32 bits access is supported,
3836 	 * so this function should not be called.
3837 	 */
3838 	if (hw->mac.type >= e1000_pch_spt)
3839 		return -E1000_ERR_NVM;
3840 	else
3841 		ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3842 
3843 	if (ret_val)
3844 		return ret_val;
3845 
3846 	*data = (u8)word;
3847 
3848 	return E1000_SUCCESS;
3849 }
3850 
3851 /**
3852  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3853  *  @hw: pointer to the HW structure
3854  *  @offset: The offset (in bytes) of the byte or word to read.
3855  *  @size: Size of data to read, 1=byte 2=word
3856  *  @data: Pointer to the word to store the value read.
3857  *
3858  *  Reads a byte or word from the NVM using the flash access registers.
3859  **/
3860 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3861 					 u8 size, u16 *data)
3862 {
3863 	union ich8_hws_flash_status hsfsts;
3864 	union ich8_hws_flash_ctrl hsflctl;
3865 	u32 flash_linear_addr;
3866 	u32 flash_data = 0;
3867 	s32 ret_val = -E1000_ERR_NVM;
3868 	u8 count = 0;
3869 
3870 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3871 
3872 	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3873 		return -E1000_ERR_NVM;
3874 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3875 			     hw->nvm.flash_base_addr);
3876 
3877 	do {
3878 		usec_delay(1);
3879 		/* Steps */
3880 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3881 		if (ret_val != E1000_SUCCESS)
3882 			break;
3883 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3884 
3885 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3886 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3887 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3888 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3889 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3890 
3891 		ret_val = e1000_flash_cycle_ich8lan(hw,
3892 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3893 
3894 		/* Check if FCERR is set to 1, if set to 1, clear it
3895 		 * and try the whole sequence a few more times, else
3896 		 * read in (shift in) the Flash Data0, the order is
3897 		 * least significant byte first msb to lsb
3898 		 */
3899 		if (ret_val == E1000_SUCCESS) {
3900 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3901 			if (size == 1)
3902 				*data = (u8)(flash_data & 0x000000FF);
3903 			else if (size == 2)
3904 				*data = (u16)(flash_data & 0x0000FFFF);
3905 			break;
3906 		} else {
3907 			/* If we've gotten here, then things are probably
3908 			 * completely hosed, but if the error condition is
3909 			 * detected, it won't hurt to give it another try...
3910 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3911 			 */
3912 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3913 							      ICH_FLASH_HSFSTS);
3914 			if (hsfsts.hsf_status.flcerr) {
3915 				/* Repeat for some time before giving up. */
3916 				continue;
3917 			} else if (!hsfsts.hsf_status.flcdone) {
3918 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3919 				break;
3920 			}
3921 		}
3922 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3923 
3924 	return ret_val;
3925 }
3926 
3927 /**
3928  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3929  *  @hw: pointer to the HW structure
3930  *  @offset: The offset (in bytes) of the dword to read.
3931  *  @data: Pointer to the dword to store the value read.
3932  *
3933  *  Reads a byte or word from the NVM using the flash access registers.
3934  **/
3935 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3936 					   u32 *data)
3937 {
3938 	union ich8_hws_flash_status hsfsts;
3939 	union ich8_hws_flash_ctrl hsflctl;
3940 	u32 flash_linear_addr;
3941 	s32 ret_val = -E1000_ERR_NVM;
3942 	u8 count = 0;
3943 
3944 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3945 
3946 		if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3947 		    hw->mac.type < e1000_pch_spt)
3948 			return -E1000_ERR_NVM;
3949 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3950 			     hw->nvm.flash_base_addr);
3951 
3952 	do {
3953 		usec_delay(1);
3954 		/* Steps */
3955 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3956 		if (ret_val != E1000_SUCCESS)
3957 			break;
3958 		/* In SPT, This register is in Lan memory space, not flash.
3959 		 * Therefore, only 32 bit access is supported
3960 		 */
3961 		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3962 
3963 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3964 		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3965 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3966 		/* In SPT, This register is in Lan memory space, not flash.
3967 		 * Therefore, only 32 bit access is supported
3968 		 */
3969 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3970 				      (u32)hsflctl.regval << 16);
3971 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3972 
3973 		ret_val = e1000_flash_cycle_ich8lan(hw,
3974 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3975 
3976 		/* Check if FCERR is set to 1, if set to 1, clear it
3977 		 * and try the whole sequence a few more times, else
3978 		 * read in (shift in) the Flash Data0, the order is
3979 		 * least significant byte first msb to lsb
3980 		 */
3981 		if (ret_val == E1000_SUCCESS) {
3982 			*data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3983 			break;
3984 		} else {
3985 			/* If we've gotten here, then things are probably
3986 			 * completely hosed, but if the error condition is
3987 			 * detected, it won't hurt to give it another try...
3988 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3989 			 */
3990 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3991 							      ICH_FLASH_HSFSTS);
3992 			if (hsfsts.hsf_status.flcerr) {
3993 				/* Repeat for some time before giving up. */
3994 				continue;
3995 			} else if (!hsfsts.hsf_status.flcdone) {
3996 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3997 				break;
3998 			}
3999 		}
4000 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4001 
4002 	return ret_val;
4003 }
4004 
4005 /**
4006  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
4007  *  @hw: pointer to the HW structure
4008  *  @offset: The offset (in bytes) of the word(s) to write.
4009  *  @words: Size of data to write in words
4010  *  @data: Pointer to the word(s) to write at offset.
4011  *
4012  *  Writes a byte or word to the NVM using the flash access registers.
4013  **/
4014 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4015 				   u16 *data)
4016 {
4017 	struct e1000_nvm_info *nvm = &hw->nvm;
4018 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4019 	u16 i;
4020 
4021 	DEBUGFUNC("e1000_write_nvm_ich8lan");
4022 
4023 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4024 	    (words == 0)) {
4025 		DEBUGOUT("nvm parameter(s) out of bounds\n");
4026 		return -E1000_ERR_NVM;
4027 	}
4028 
4029 	nvm->ops.acquire(hw);
4030 
4031 	for (i = 0; i < words; i++) {
4032 		dev_spec->shadow_ram[offset + i].modified = TRUE;
4033 		dev_spec->shadow_ram[offset + i].value = data[i];
4034 	}
4035 
4036 	nvm->ops.release(hw);
4037 
4038 	return E1000_SUCCESS;
4039 }
4040 
4041 /**
4042  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4043  *  @hw: pointer to the HW structure
4044  *
4045  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4046  *  which writes the checksum to the shadow ram.  The changes in the shadow
4047  *  ram are then committed to the EEPROM by processing each bank at a time
4048  *  checking for the modified bit and writing only the pending changes.
4049  *  After a successful commit, the shadow ram is cleared and is ready for
4050  *  future writes.
4051  **/
4052 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4053 {
4054 	struct e1000_nvm_info *nvm = &hw->nvm;
4055 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4056 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4057 	s32 ret_val;
4058 	u32 dword = 0;
4059 
4060 	DEBUGFUNC("e1000_update_nvm_checksum_spt");
4061 
4062 	ret_val = e1000_update_nvm_checksum_generic(hw);
4063 	if (ret_val)
4064 		goto out;
4065 
4066 	if (nvm->type != e1000_nvm_flash_sw)
4067 		goto out;
4068 
4069 	nvm->ops.acquire(hw);
4070 
4071 	/* We're writing to the opposite bank so if we're on bank 1,
4072 	 * write to bank 0 etc.  We also need to erase the segment that
4073 	 * is going to be written
4074 	 */
4075 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4076 	if (ret_val != E1000_SUCCESS) {
4077 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4078 		bank = 0;
4079 	}
4080 
4081 	if (bank == 0) {
4082 		new_bank_offset = nvm->flash_bank_size;
4083 		old_bank_offset = 0;
4084 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4085 		if (ret_val)
4086 			goto release;
4087 	} else {
4088 		old_bank_offset = nvm->flash_bank_size;
4089 		new_bank_offset = 0;
4090 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4091 		if (ret_val)
4092 			goto release;
4093 	}
4094 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4095 		/* Determine whether to write the value stored
4096 		 * in the other NVM bank or a modified value stored
4097 		 * in the shadow RAM
4098 		 */
4099 		ret_val = e1000_read_flash_dword_ich8lan(hw,
4100 							 i + old_bank_offset,
4101 							 &dword);
4102 
4103 		if (dev_spec->shadow_ram[i].modified) {
4104 			dword &= 0xffff0000;
4105 			dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4106 		}
4107 		if (dev_spec->shadow_ram[i + 1].modified) {
4108 			dword &= 0x0000ffff;
4109 			dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4110 				  << 16);
4111 		}
4112 		if (ret_val)
4113 			break;
4114 
4115 		/* If the word is 0x13, then make sure the signature bits
4116 		 * (15:14) are 11b until the commit has completed.
4117 		 * This will allow us to write 10b which indicates the
4118 		 * signature is valid.  We want to do this after the write
4119 		 * has completed so that we don't mark the segment valid
4120 		 * while the write is still in progress
4121 		 */
4122 		if (i == E1000_ICH_NVM_SIG_WORD - 1)
4123 			dword |= E1000_ICH_NVM_SIG_MASK << 16;
4124 
4125 		/* Convert offset to bytes. */
4126 		act_offset = (i + new_bank_offset) << 1;
4127 
4128 		usec_delay(100);
4129 
4130 		/* Write the data to the new bank. Offset in words*/
4131 		act_offset = i + new_bank_offset;
4132 		ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4133 								dword);
4134 		if (ret_val)
4135 			break;
4136 	 }
4137 
4138 	/* Don't bother writing the segment valid bits if sector
4139 	 * programming failed.
4140 	 */
4141 	if (ret_val) {
4142 		DEBUGOUT("Flash commit failed.\n");
4143 		goto release;
4144 	}
4145 
4146 	/* Finally validate the new segment by setting bit 15:14
4147 	 * to 10b in word 0x13 , this can be done without an
4148 	 * erase as well since these bits are 11 to start with
4149 	 * and we need to change bit 14 to 0b
4150 	 */
4151 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4152 
4153 	/*offset in words but we read dword*/
4154 	--act_offset;
4155 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4156 
4157 	if (ret_val)
4158 		goto release;
4159 
4160 	dword &= 0xBFFFFFFF;
4161 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4162 
4163 	if (ret_val)
4164 		goto release;
4165 
4166 	/* offset in words but we read dword*/
4167 	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4168 	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4169 
4170 	if (ret_val)
4171 		goto release;
4172 
4173 	dword &= 0x00FFFFFF;
4174 	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4175 
4176 	if (ret_val)
4177 		goto release;
4178 
4179 	/* Great!  Everything worked, we can now clear the cached entries. */
4180 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4181 		dev_spec->shadow_ram[i].modified = FALSE;
4182 		dev_spec->shadow_ram[i].value = 0xFFFF;
4183 	}
4184 
4185 release:
4186 	nvm->ops.release(hw);
4187 
4188 	/* Reload the EEPROM, or else modifications will not appear
4189 	 * until after the next adapter reset.
4190 	 */
4191 	if (!ret_val) {
4192 		nvm->ops.reload(hw);
4193 		msec_delay(10);
4194 	}
4195 
4196 out:
4197 	if (ret_val)
4198 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4199 
4200 	return ret_val;
4201 }
4202 
4203 /**
4204  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4205  *  @hw: pointer to the HW structure
4206  *
4207  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4208  *  which writes the checksum to the shadow ram.  The changes in the shadow
4209  *  ram are then committed to the EEPROM by processing each bank at a time
4210  *  checking for the modified bit and writing only the pending changes.
4211  *  After a successful commit, the shadow ram is cleared and is ready for
4212  *  future writes.
4213  **/
4214 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4215 {
4216 	struct e1000_nvm_info *nvm = &hw->nvm;
4217 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4218 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4219 	s32 ret_val;
4220 	u16 data = 0;
4221 
4222 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4223 
4224 	ret_val = e1000_update_nvm_checksum_generic(hw);
4225 	if (ret_val)
4226 		goto out;
4227 
4228 	if (nvm->type != e1000_nvm_flash_sw)
4229 		goto out;
4230 
4231 	nvm->ops.acquire(hw);
4232 
4233 	/* We're writing to the opposite bank so if we're on bank 1,
4234 	 * write to bank 0 etc.  We also need to erase the segment that
4235 	 * is going to be written
4236 	 */
4237 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4238 	if (ret_val != E1000_SUCCESS) {
4239 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4240 		bank = 0;
4241 	}
4242 
4243 	if (bank == 0) {
4244 		new_bank_offset = nvm->flash_bank_size;
4245 		old_bank_offset = 0;
4246 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4247 		if (ret_val)
4248 			goto release;
4249 	} else {
4250 		old_bank_offset = nvm->flash_bank_size;
4251 		new_bank_offset = 0;
4252 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4253 		if (ret_val)
4254 			goto release;
4255 	}
4256 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4257 		if (dev_spec->shadow_ram[i].modified) {
4258 			data = dev_spec->shadow_ram[i].value;
4259 		} else {
4260 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
4261 								old_bank_offset,
4262 								&data);
4263 			if (ret_val)
4264 				break;
4265 		}
4266 		/* If the word is 0x13, then make sure the signature bits
4267 		 * (15:14) are 11b until the commit has completed.
4268 		 * This will allow us to write 10b which indicates the
4269 		 * signature is valid.  We want to do this after the write
4270 		 * has completed so that we don't mark the segment valid
4271 		 * while the write is still in progress
4272 		 */
4273 		if (i == E1000_ICH_NVM_SIG_WORD)
4274 			data |= E1000_ICH_NVM_SIG_MASK;
4275 
4276 		/* Convert offset to bytes. */
4277 		act_offset = (i + new_bank_offset) << 1;
4278 
4279 		usec_delay(100);
4280 
4281 		/* Write the bytes to the new bank. */
4282 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4283 							       act_offset,
4284 							       (u8)data);
4285 		if (ret_val)
4286 			break;
4287 
4288 		usec_delay(100);
4289 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4290 							  act_offset + 1,
4291 							  (u8)(data >> 8));
4292 		if (ret_val)
4293 			break;
4294 	 }
4295 
4296 	/* Don't bother writing the segment valid bits if sector
4297 	 * programming failed.
4298 	 */
4299 	if (ret_val) {
4300 		DEBUGOUT("Flash commit failed.\n");
4301 		goto release;
4302 	}
4303 
4304 	/* Finally validate the new segment by setting bit 15:14
4305 	 * to 10b in word 0x13 , this can be done without an
4306 	 * erase as well since these bits are 11 to start with
4307 	 * and we need to change bit 14 to 0b
4308 	 */
4309 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4310 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4311 	if (ret_val)
4312 		goto release;
4313 
4314 	data &= 0xBFFF;
4315 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4316 						       (u8)(data >> 8));
4317 	if (ret_val)
4318 		goto release;
4319 
4320 	/* And invalidate the previously valid segment by setting
4321 	 * its signature word (0x13) high_byte to 0b. This can be
4322 	 * done without an erase because flash erase sets all bits
4323 	 * to 1's. We can write 1's to 0's without an erase
4324 	 */
4325 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4326 
4327 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4328 
4329 	if (ret_val)
4330 		goto release;
4331 
4332 	/* Great!  Everything worked, we can now clear the cached entries. */
4333 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4334 		dev_spec->shadow_ram[i].modified = FALSE;
4335 		dev_spec->shadow_ram[i].value = 0xFFFF;
4336 	}
4337 
4338 release:
4339 	nvm->ops.release(hw);
4340 
4341 	/* Reload the EEPROM, or else modifications will not appear
4342 	 * until after the next adapter reset.
4343 	 */
4344 	if (!ret_val) {
4345 		nvm->ops.reload(hw);
4346 		msec_delay(10);
4347 	}
4348 
4349 out:
4350 	if (ret_val)
4351 		DEBUGOUT1("NVM update error: %d\n", ret_val);
4352 
4353 	return ret_val;
4354 }
4355 
4356 /**
4357  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4358  *  @hw: pointer to the HW structure
4359  *
4360  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4361  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4362  *  calculated, in which case we need to calculate the checksum and set bit 6.
4363  **/
4364 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4365 {
4366 	s32 ret_val;
4367 	u16 data;
4368 	u16 word;
4369 	u16 valid_csum_mask;
4370 
4371 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4372 
4373 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4374 	 * the checksum needs to be fixed.  This bit is an indication that
4375 	 * the NVM was prepared by OEM software and did not calculate
4376 	 * the checksum...a likely scenario.
4377 	 */
4378 	switch (hw->mac.type) {
4379 	case e1000_pch_lpt:
4380 	case e1000_pch_spt:
4381 	case e1000_pch_cnp:
4382 		word = NVM_COMPAT;
4383 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4384 		break;
4385 	default:
4386 		word = NVM_FUTURE_INIT_WORD1;
4387 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4388 		break;
4389 	}
4390 
4391 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4392 	if (ret_val)
4393 		return ret_val;
4394 
4395 	if (!(data & valid_csum_mask)) {
4396 		data |= valid_csum_mask;
4397 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4398 		if (ret_val)
4399 			return ret_val;
4400 		ret_val = hw->nvm.ops.update(hw);
4401 		if (ret_val)
4402 			return ret_val;
4403 	}
4404 
4405 	return e1000_validate_nvm_checksum_generic(hw);
4406 }
4407 
4408 /**
4409  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4410  *  @hw: pointer to the HW structure
4411  *  @offset: The offset (in bytes) of the byte/word to read.
4412  *  @size: Size of data to read, 1=byte 2=word
4413  *  @data: The byte(s) to write to the NVM.
4414  *
4415  *  Writes one/two bytes to the NVM using the flash access registers.
4416  **/
4417 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4418 					  u8 size, u16 data)
4419 {
4420 	union ich8_hws_flash_status hsfsts;
4421 	union ich8_hws_flash_ctrl hsflctl;
4422 	u32 flash_linear_addr;
4423 	u32 flash_data = 0;
4424 	s32 ret_val;
4425 	u8 count = 0;
4426 
4427 	DEBUGFUNC("e1000_write_ich8_data");
4428 
4429 	if (hw->mac.type >= e1000_pch_spt) {
4430 		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4431 			return -E1000_ERR_NVM;
4432 	} else {
4433 		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4434 			return -E1000_ERR_NVM;
4435 	}
4436 
4437 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4438 			     hw->nvm.flash_base_addr);
4439 
4440 	do {
4441 		usec_delay(1);
4442 		/* Steps */
4443 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4444 		if (ret_val != E1000_SUCCESS)
4445 			break;
4446 		/* In SPT, This register is in Lan memory space, not
4447 		 * flash.  Therefore, only 32 bit access is supported
4448 		 */
4449 		if (hw->mac.type >= e1000_pch_spt)
4450 			hsflctl.regval =
4451 			    E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4452 		else
4453 			hsflctl.regval =
4454 			    E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4455 
4456 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4457 		hsflctl.hsf_ctrl.fldbcount = size - 1;
4458 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4459 		/* In SPT, This register is in Lan memory space,
4460 		 * not flash.  Therefore, only 32 bit access is
4461 		 * supported
4462 		 */
4463 		if (hw->mac.type >= e1000_pch_spt)
4464 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4465 					      hsflctl.regval << 16);
4466 		else
4467 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4468 						hsflctl.regval);
4469 
4470 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4471 
4472 		if (size == 1)
4473 			flash_data = (u32)data & 0x00FF;
4474 		else
4475 			flash_data = (u32)data;
4476 
4477 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4478 
4479 		/* check if FCERR is set to 1 , if set to 1, clear it
4480 		 * and try the whole sequence a few more times else done
4481 		 */
4482 		ret_val =
4483 		    e1000_flash_cycle_ich8lan(hw,
4484 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4485 		if (ret_val == E1000_SUCCESS)
4486 			break;
4487 
4488 		/* If we're here, then things are most likely
4489 		 * completely hosed, but if the error condition
4490 		 * is detected, it won't hurt to give it another
4491 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4492 		 */
4493 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4494 		if (hsfsts.hsf_status.flcerr)
4495 			/* Repeat for some time before giving up. */
4496 			continue;
4497 		if (!hsfsts.hsf_status.flcdone) {
4498 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4499 			break;
4500 		}
4501 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4502 
4503 	return ret_val;
4504 }
4505 
4506 /**
4507 *  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4508 *  @hw: pointer to the HW structure
4509 *  @offset: The offset (in bytes) of the dwords to read.
4510 *  @data: The 4 bytes to write to the NVM.
4511 *
4512 *  Writes one/two/four bytes to the NVM using the flash access registers.
4513 **/
4514 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4515 					    u32 data)
4516 {
4517 	union ich8_hws_flash_status hsfsts;
4518 	union ich8_hws_flash_ctrl hsflctl;
4519 	u32 flash_linear_addr;
4520 	s32 ret_val;
4521 	u8 count = 0;
4522 
4523 	DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4524 
4525 	if (hw->mac.type >= e1000_pch_spt) {
4526 		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4527 			return -E1000_ERR_NVM;
4528 	}
4529 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4530 			     hw->nvm.flash_base_addr);
4531 	do {
4532 		usec_delay(1);
4533 		/* Steps */
4534 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4535 		if (ret_val != E1000_SUCCESS)
4536 			break;
4537 
4538 		/* In SPT, This register is in Lan memory space, not
4539 		 * flash.  Therefore, only 32 bit access is supported
4540 		 */
4541 		if (hw->mac.type >= e1000_pch_spt)
4542 			hsflctl.regval = E1000_READ_FLASH_REG(hw,
4543 							      ICH_FLASH_HSFSTS)
4544 					 >> 16;
4545 		else
4546 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4547 							      ICH_FLASH_HSFCTL);
4548 
4549 		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4550 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4551 
4552 		/* In SPT, This register is in Lan memory space,
4553 		 * not flash.  Therefore, only 32 bit access is
4554 		 * supported
4555 		 */
4556 		if (hw->mac.type >= e1000_pch_spt)
4557 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4558 					      hsflctl.regval << 16);
4559 		else
4560 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4561 						hsflctl.regval);
4562 
4563 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4564 
4565 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4566 
4567 		/* check if FCERR is set to 1 , if set to 1, clear it
4568 		 * and try the whole sequence a few more times else done
4569 		 */
4570 		ret_val = e1000_flash_cycle_ich8lan(hw,
4571 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4572 
4573 		if (ret_val == E1000_SUCCESS)
4574 			break;
4575 
4576 		/* If we're here, then things are most likely
4577 		 * completely hosed, but if the error condition
4578 		 * is detected, it won't hurt to give it another
4579 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4580 		 */
4581 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4582 
4583 		if (hsfsts.hsf_status.flcerr)
4584 			/* Repeat for some time before giving up. */
4585 			continue;
4586 		if (!hsfsts.hsf_status.flcdone) {
4587 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4588 			break;
4589 		}
4590 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4591 
4592 	return ret_val;
4593 }
4594 
4595 /**
4596  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4597  *  @hw: pointer to the HW structure
4598  *  @offset: The index of the byte to read.
4599  *  @data: The byte to write to the NVM.
4600  *
4601  *  Writes a single byte to the NVM using the flash access registers.
4602  **/
4603 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4604 					  u8 data)
4605 {
4606 	u16 word = (u16)data;
4607 
4608 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4609 
4610 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4611 }
4612 
4613 /**
4614 *  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4615 *  @hw: pointer to the HW structure
4616 *  @offset: The offset of the word to write.
4617 *  @dword: The dword to write to the NVM.
4618 *
4619 *  Writes a single dword to the NVM using the flash access registers.
4620 *  Goes through a retry algorithm before giving up.
4621 **/
4622 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4623 						 u32 offset, u32 dword)
4624 {
4625 	s32 ret_val;
4626 	u16 program_retries;
4627 
4628 	DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4629 
4630 	/* Must convert word offset into bytes. */
4631 	offset <<= 1;
4632 
4633 	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4634 
4635 	if (!ret_val)
4636 		return ret_val;
4637 	for (program_retries = 0; program_retries < 100; program_retries++) {
4638 		DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4639 		usec_delay(100);
4640 		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4641 		if (ret_val == E1000_SUCCESS)
4642 			break;
4643 	}
4644 	if (program_retries == 100)
4645 		return -E1000_ERR_NVM;
4646 
4647 	return E1000_SUCCESS;
4648 }
4649 
4650 /**
4651  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4652  *  @hw: pointer to the HW structure
4653  *  @offset: The offset of the byte to write.
4654  *  @byte: The byte to write to the NVM.
4655  *
4656  *  Writes a single byte to the NVM using the flash access registers.
4657  *  Goes through a retry algorithm before giving up.
4658  **/
4659 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4660 						u32 offset, u8 byte)
4661 {
4662 	s32 ret_val;
4663 	u16 program_retries;
4664 
4665 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4666 
4667 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4668 	if (!ret_val)
4669 		return ret_val;
4670 
4671 	for (program_retries = 0; program_retries < 100; program_retries++) {
4672 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4673 		usec_delay(100);
4674 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4675 		if (ret_val == E1000_SUCCESS)
4676 			break;
4677 	}
4678 	if (program_retries == 100)
4679 		return -E1000_ERR_NVM;
4680 
4681 	return E1000_SUCCESS;
4682 }
4683 
4684 /**
4685  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4686  *  @hw: pointer to the HW structure
4687  *  @bank: 0 for first bank, 1 for second bank, etc.
4688  *
4689  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4690  *  bank N is 4096 * N + flash_reg_addr.
4691  **/
4692 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4693 {
4694 	struct e1000_nvm_info *nvm = &hw->nvm;
4695 	union ich8_hws_flash_status hsfsts;
4696 	union ich8_hws_flash_ctrl hsflctl;
4697 	u32 flash_linear_addr;
4698 	/* bank size is in 16bit words - adjust to bytes */
4699 	u32 flash_bank_size = nvm->flash_bank_size * 2;
4700 	s32 ret_val;
4701 	s32 count = 0;
4702 	s32 j, iteration, sector_size;
4703 
4704 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4705 
4706 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4707 
4708 	/* Determine HW Sector size: Read BERASE bits of hw flash status
4709 	 * register
4710 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4711 	 *     consecutive sectors.  The start index for the nth Hw sector
4712 	 *     can be calculated as = bank * 4096 + n * 256
4713 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4714 	 *     The start index for the nth Hw sector can be calculated
4715 	 *     as = bank * 4096
4716 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4717 	 *     (ich9 only, otherwise error condition)
4718 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4719 	 */
4720 	switch (hsfsts.hsf_status.berasesz) {
4721 	case 0:
4722 		/* Hw sector size 256 */
4723 		sector_size = ICH_FLASH_SEG_SIZE_256;
4724 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4725 		break;
4726 	case 1:
4727 		sector_size = ICH_FLASH_SEG_SIZE_4K;
4728 		iteration = 1;
4729 		break;
4730 	case 2:
4731 		sector_size = ICH_FLASH_SEG_SIZE_8K;
4732 		iteration = 1;
4733 		break;
4734 	case 3:
4735 		sector_size = ICH_FLASH_SEG_SIZE_64K;
4736 		iteration = 1;
4737 		break;
4738 	default:
4739 		return -E1000_ERR_NVM;
4740 	}
4741 
4742 	/* Start with the base address, then add the sector offset. */
4743 	flash_linear_addr = hw->nvm.flash_base_addr;
4744 	flash_linear_addr += (bank) ? flash_bank_size : 0;
4745 
4746 	for (j = 0; j < iteration; j++) {
4747 		do {
4748 			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4749 
4750 			/* Steps */
4751 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4752 			if (ret_val)
4753 				return ret_val;
4754 
4755 			/* Write a value 11 (block Erase) in Flash
4756 			 * Cycle field in hw flash control
4757 			 */
4758 			if (hw->mac.type >= e1000_pch_spt)
4759 				hsflctl.regval =
4760 				    E1000_READ_FLASH_REG(hw,
4761 							 ICH_FLASH_HSFSTS)>>16;
4762 			else
4763 				hsflctl.regval =
4764 				    E1000_READ_FLASH_REG16(hw,
4765 							   ICH_FLASH_HSFCTL);
4766 
4767 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4768 			if (hw->mac.type >= e1000_pch_spt)
4769 				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4770 						      hsflctl.regval << 16);
4771 			else
4772 				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4773 							hsflctl.regval);
4774 
4775 			/* Write the last 24 bits of an index within the
4776 			 * block into Flash Linear address field in Flash
4777 			 * Address.
4778 			 */
4779 			flash_linear_addr += (j * sector_size);
4780 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4781 					      flash_linear_addr);
4782 
4783 			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4784 			if (ret_val == E1000_SUCCESS)
4785 				break;
4786 
4787 			/* Check if FCERR is set to 1.  If 1,
4788 			 * clear it and try the whole sequence
4789 			 * a few more times else Done
4790 			 */
4791 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4792 						      ICH_FLASH_HSFSTS);
4793 			if (hsfsts.hsf_status.flcerr)
4794 				/* repeat for some time before giving up */
4795 				continue;
4796 			else if (!hsfsts.hsf_status.flcdone)
4797 				return ret_val;
4798 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4799 	}
4800 
4801 	return E1000_SUCCESS;
4802 }
4803 
4804 /**
4805  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4806  *  @hw: pointer to the HW structure
4807  *  @data: Pointer to the LED settings
4808  *
4809  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4810  *  settings is all 0's or F's, set the LED default to a valid LED default
4811  *  setting.
4812  **/
4813 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4814 {
4815 	s32 ret_val;
4816 
4817 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
4818 
4819 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4820 	if (ret_val) {
4821 		DEBUGOUT("NVM Read Error\n");
4822 		return ret_val;
4823 	}
4824 
4825 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4826 		*data = ID_LED_DEFAULT_ICH8LAN;
4827 
4828 	return E1000_SUCCESS;
4829 }
4830 
4831 /**
4832  *  e1000_id_led_init_pchlan - store LED configurations
4833  *  @hw: pointer to the HW structure
4834  *
4835  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4836  *  the PHY LED configuration register.
4837  *
4838  *  PCH also does not have an "always on" or "always off" mode which
4839  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4840  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4841  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4842  *  link based on logic in e1000_led_[on|off]_pchlan().
4843  **/
4844 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4845 {
4846 	struct e1000_mac_info *mac = &hw->mac;
4847 	s32 ret_val;
4848 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4849 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4850 	u16 data, i, temp, shift;
4851 
4852 	DEBUGFUNC("e1000_id_led_init_pchlan");
4853 
4854 	/* Get default ID LED modes */
4855 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4856 	if (ret_val)
4857 		return ret_val;
4858 
4859 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4860 	mac->ledctl_mode1 = mac->ledctl_default;
4861 	mac->ledctl_mode2 = mac->ledctl_default;
4862 
4863 	for (i = 0; i < 4; i++) {
4864 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4865 		shift = (i * 5);
4866 		switch (temp) {
4867 		case ID_LED_ON1_DEF2:
4868 		case ID_LED_ON1_ON2:
4869 		case ID_LED_ON1_OFF2:
4870 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4871 			mac->ledctl_mode1 |= (ledctl_on << shift);
4872 			break;
4873 		case ID_LED_OFF1_DEF2:
4874 		case ID_LED_OFF1_ON2:
4875 		case ID_LED_OFF1_OFF2:
4876 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4877 			mac->ledctl_mode1 |= (ledctl_off << shift);
4878 			break;
4879 		default:
4880 			/* Do nothing */
4881 			break;
4882 		}
4883 		switch (temp) {
4884 		case ID_LED_DEF1_ON2:
4885 		case ID_LED_ON1_ON2:
4886 		case ID_LED_OFF1_ON2:
4887 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4888 			mac->ledctl_mode2 |= (ledctl_on << shift);
4889 			break;
4890 		case ID_LED_DEF1_OFF2:
4891 		case ID_LED_ON1_OFF2:
4892 		case ID_LED_OFF1_OFF2:
4893 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4894 			mac->ledctl_mode2 |= (ledctl_off << shift);
4895 			break;
4896 		default:
4897 			/* Do nothing */
4898 			break;
4899 		}
4900 	}
4901 
4902 	return E1000_SUCCESS;
4903 }
4904 
4905 /**
4906  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4907  *  @hw: pointer to the HW structure
4908  *
4909  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4910  *  register, so the the bus width is hard coded.
4911  **/
4912 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4913 {
4914 	struct e1000_bus_info *bus = &hw->bus;
4915 	s32 ret_val;
4916 
4917 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
4918 
4919 	ret_val = e1000_get_bus_info_pcie_generic(hw);
4920 
4921 	/* ICH devices are "PCI Express"-ish.  They have
4922 	 * a configuration space, but do not contain
4923 	 * PCI Express Capability registers, so bus width
4924 	 * must be hardcoded.
4925 	 */
4926 	if (bus->width == e1000_bus_width_unknown)
4927 		bus->width = e1000_bus_width_pcie_x1;
4928 
4929 	return ret_val;
4930 }
4931 
4932 /**
4933  *  e1000_reset_hw_ich8lan - Reset the hardware
4934  *  @hw: pointer to the HW structure
4935  *
4936  *  Does a full reset of the hardware which includes a reset of the PHY and
4937  *  MAC.
4938  **/
4939 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4940 {
4941 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4942 	u16 kum_cfg;
4943 	u32 ctrl, reg;
4944 	s32 ret_val;
4945 	u16 pci_cfg;
4946 
4947 	DEBUGFUNC("e1000_reset_hw_ich8lan");
4948 
4949 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4950 	 * on the last TLP read/write transaction when MAC is reset.
4951 	 */
4952 	ret_val = e1000_disable_pcie_master_generic(hw);
4953 	if (ret_val)
4954 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
4955 
4956 	DEBUGOUT("Masking off all interrupts\n");
4957 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4958 
4959 	/* Disable the Transmit and Receive units.  Then delay to allow
4960 	 * any pending transactions to complete before we hit the MAC
4961 	 * with the global reset.
4962 	 */
4963 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
4964 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4965 	E1000_WRITE_FLUSH(hw);
4966 
4967 	msec_delay(10);
4968 
4969 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4970 	if (hw->mac.type == e1000_ich8lan) {
4971 		/* Set Tx and Rx buffer allocation to 8k apiece. */
4972 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4973 		/* Set Packet Buffer Size to 16k. */
4974 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4975 	}
4976 
4977 	if (hw->mac.type == e1000_pchlan) {
4978 		/* Save the NVM K1 bit setting*/
4979 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4980 		if (ret_val)
4981 			return ret_val;
4982 
4983 		if (kum_cfg & E1000_NVM_K1_ENABLE)
4984 			dev_spec->nvm_k1_enabled = TRUE;
4985 		else
4986 			dev_spec->nvm_k1_enabled = FALSE;
4987 	}
4988 
4989 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4990 
4991 	if (!hw->phy.ops.check_reset_block(hw)) {
4992 		/* Full-chip reset requires MAC and PHY reset at the same
4993 		 * time to make sure the interface between MAC and the
4994 		 * external PHY is reset.
4995 		 */
4996 		ctrl |= E1000_CTRL_PHY_RST;
4997 
4998 		/* Gate automatic PHY configuration by hardware on
4999 		 * non-managed 82579
5000 		 */
5001 		if ((hw->mac.type == e1000_pch2lan) &&
5002 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5003 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
5004 	}
5005 	ret_val = e1000_acquire_swflag_ich8lan(hw);
5006 
5007 	/* Read from EXTCNF_CTRL in e1000_acquire_swflag_ich8lan function
5008 	 * may occur during global reset and cause system hang.
5009 	 * Configuration space access creates the needed delay.
5010 	 * Write to E1000_STRAP RO register E1000_PCI_VENDOR_ID_REGISTER value
5011 	 * insures configuration space read is done before global reset.
5012 	 */
5013 	e1000_read_pci_cfg(hw, E1000_PCI_VENDOR_ID_REGISTER, &pci_cfg);
5014 	E1000_WRITE_REG(hw, E1000_STRAP, pci_cfg);
5015 	DEBUGOUT("Issuing a global reset to ich8lan\n");
5016 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5017 	/* cannot issue a flush here because it hangs the hardware */
5018 	msec_delay(20);
5019 
5020 	/* Configuration space access improve HW level time sync mechanism.
5021 	 * Write to E1000_STRAP RO register E1000_PCI_VENDOR_ID_REGISTER
5022 	 * value to insure configuration space read is done
5023 	 * before any access to mac register.
5024 	 */
5025 	e1000_read_pci_cfg(hw, E1000_PCI_VENDOR_ID_REGISTER, &pci_cfg);
5026 	E1000_WRITE_REG(hw, E1000_STRAP, pci_cfg);
5027 
5028 	/* Set Phy Config Counter to 50msec */
5029 	if (hw->mac.type == e1000_pch2lan) {
5030 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5031 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5032 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5033 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5034 	}
5035 
5036 	if (ctrl & E1000_CTRL_PHY_RST) {
5037 		ret_val = hw->phy.ops.get_cfg_done(hw);
5038 		if (ret_val)
5039 			return ret_val;
5040 
5041 		ret_val = e1000_post_phy_reset_ich8lan(hw);
5042 		if (ret_val)
5043 			return ret_val;
5044 	}
5045 
5046 	/* For PCH, this write will make sure that any noise
5047 	 * will be detected as a CRC error and be dropped rather than show up
5048 	 * as a bad packet to the DMA engine.
5049 	 */
5050 	if (hw->mac.type == e1000_pchlan)
5051 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5052 
5053 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5054 	E1000_READ_REG(hw, E1000_ICR);
5055 
5056 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
5057 	reg |= E1000_KABGTXD_BGSQLBIAS;
5058 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5059 
5060 	return E1000_SUCCESS;
5061 }
5062 
5063 /**
5064  *  e1000_init_hw_ich8lan - Initialize the hardware
5065  *  @hw: pointer to the HW structure
5066  *
5067  *  Prepares the hardware for transmit and receive by doing the following:
5068  *   - initialize hardware bits
5069  *   - initialize LED identification
5070  *   - setup receive address registers
5071  *   - setup flow control
5072  *   - setup transmit descriptors
5073  *   - clear statistics
5074  **/
5075 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5076 {
5077 	struct e1000_mac_info *mac = &hw->mac;
5078 	u32 ctrl_ext, txdctl, snoop;
5079 	s32 ret_val;
5080 	u16 i;
5081 
5082 	DEBUGFUNC("e1000_init_hw_ich8lan");
5083 
5084 	e1000_initialize_hw_bits_ich8lan(hw);
5085 
5086 	/* Initialize identification LED */
5087 	ret_val = mac->ops.id_led_init(hw);
5088 	/* An error is not fatal and we should not stop init due to this */
5089 	if (ret_val)
5090 		DEBUGOUT("Error initializing identification LED\n");
5091 
5092 	/* Setup the receive address. */
5093 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5094 
5095 	/* Zero out the Multicast HASH table */
5096 	DEBUGOUT("Zeroing the MTA\n");
5097 	for (i = 0; i < mac->mta_reg_count; i++)
5098 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5099 
5100 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
5101 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
5102 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5103 	 */
5104 	if (hw->phy.type == e1000_phy_82578) {
5105 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5106 		i &= ~BM_WUC_HOST_WU_BIT;
5107 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5108 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
5109 		if (ret_val)
5110 			return ret_val;
5111 	}
5112 
5113 	/* Setup link and flow control */
5114 	ret_val = mac->ops.setup_link(hw);
5115 
5116 	/* Set the transmit descriptor write-back policy for both queues */
5117 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5118 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5119 		  E1000_TXDCTL_FULL_TX_DESC_WB);
5120 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5121 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5122 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5123 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5124 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5125 		  E1000_TXDCTL_FULL_TX_DESC_WB);
5126 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5127 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5128 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5129 
5130 	/* ICH8 has opposite polarity of no_snoop bits.
5131 	 * By default, we should use snoop behavior.
5132 	 */
5133 	if (mac->type == e1000_ich8lan)
5134 		snoop = PCIE_ICH8_SNOOP_ALL;
5135 	else
5136 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5137 	e1000_set_pcie_no_snoop_generic(hw, snoop);
5138 
5139 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5140 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5141 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5142 
5143 	/* Clear all of the statistics registers (clear on read).  It is
5144 	 * important that we do this after we have tried to establish link
5145 	 * because the symbol error count will increment wildly if there
5146 	 * is no link.
5147 	 */
5148 	e1000_clear_hw_cntrs_ich8lan(hw);
5149 
5150 	return ret_val;
5151 }
5152 
5153 /**
5154  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5155  *  @hw: pointer to the HW structure
5156  *
5157  *  Sets/Clears required hardware bits necessary for correctly setting up the
5158  *  hardware for transmit and receive.
5159  **/
5160 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5161 {
5162 	u32 reg;
5163 
5164 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5165 
5166 	/* Extended Device Control */
5167 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5168 	reg |= (1 << 22);
5169 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
5170 	if (hw->mac.type >= e1000_pchlan)
5171 		reg |= E1000_CTRL_EXT_PHYPDEN;
5172 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5173 
5174 	/* Transmit Descriptor Control 0 */
5175 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5176 	reg |= (1 << 22);
5177 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5178 
5179 	/* Transmit Descriptor Control 1 */
5180 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5181 	reg |= (1 << 22);
5182 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5183 
5184 	/* Transmit Arbitration Control 0 */
5185 	reg = E1000_READ_REG(hw, E1000_TARC(0));
5186 	if (hw->mac.type == e1000_ich8lan)
5187 		reg |= (1 << 28) | (1 << 29);
5188 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5189 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5190 
5191 	/* Transmit Arbitration Control 1 */
5192 	reg = E1000_READ_REG(hw, E1000_TARC(1));
5193 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5194 		reg &= ~(1 << 28);
5195 	else
5196 		reg |= (1 << 28);
5197 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
5198 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5199 
5200 	/* Device Status */
5201 	if (hw->mac.type == e1000_ich8lan) {
5202 		reg = E1000_READ_REG(hw, E1000_STATUS);
5203 		reg &= ~(1 << 31);
5204 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
5205 	}
5206 
5207 	/* work-around descriptor data corruption issue during nfs v2 udp
5208 	 * traffic, just disable the nfs filtering capability
5209 	 */
5210 	reg = E1000_READ_REG(hw, E1000_RFCTL);
5211 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5212 
5213 	/* Disable IPv6 extension header parsing because some malformed
5214 	 * IPv6 headers can hang the Rx.
5215 	 */
5216 	if (hw->mac.type == e1000_ich8lan)
5217 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5218 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5219 
5220 	/* Enable ECC on Lynxpoint */
5221 	if (hw->mac.type >= e1000_pch_lpt) {
5222 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5223 		reg |= E1000_PBECCSTS_ECC_ENABLE;
5224 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5225 
5226 		reg = E1000_READ_REG(hw, E1000_CTRL);
5227 		reg |= E1000_CTRL_MEHE;
5228 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
5229 	}
5230 
5231 	return;
5232 }
5233 
5234 /**
5235  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5236  *  @hw: pointer to the HW structure
5237  *
5238  *  Determines which flow control settings to use, then configures flow
5239  *  control.  Calls the appropriate media-specific link configuration
5240  *  function.  Assuming the adapter has a valid link partner, a valid link
5241  *  should be established.  Assumes the hardware has previously been reset
5242  *  and the transmitter and receiver are not enabled.
5243  **/
5244 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5245 {
5246 	s32 ret_val;
5247 
5248 	DEBUGFUNC("e1000_setup_link_ich8lan");
5249 
5250 	/* ICH parts do not have a word in the NVM to determine
5251 	 * the default flow control setting, so we explicitly
5252 	 * set it to full.
5253 	 */
5254 	if (hw->fc.requested_mode == e1000_fc_default)
5255 		hw->fc.requested_mode = e1000_fc_full;
5256 
5257 	/* Save off the requested flow control mode for use later.  Depending
5258 	 * on the link partner's capabilities, we may or may not use this mode.
5259 	 */
5260 	hw->fc.current_mode = hw->fc.requested_mode;
5261 
5262 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5263 		hw->fc.current_mode);
5264 
5265 	if (!hw->phy.ops.check_reset_block(hw)) {
5266 		/* Continue to configure the copper link. */
5267 		ret_val = hw->mac.ops.setup_physical_interface(hw);
5268 		if (ret_val)
5269 			return ret_val;
5270 	}
5271 
5272 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5273 	if ((hw->phy.type == e1000_phy_82578) ||
5274 	    (hw->phy.type == e1000_phy_82579) ||
5275 	    (hw->phy.type == e1000_phy_i217) ||
5276 	    (hw->phy.type == e1000_phy_82577)) {
5277 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5278 
5279 		ret_val = hw->phy.ops.write_reg(hw,
5280 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
5281 					     hw->fc.pause_time);
5282 		if (ret_val)
5283 			return ret_val;
5284 	}
5285 
5286 	return e1000_set_fc_watermarks_generic(hw);
5287 }
5288 
5289 /**
5290  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5291  *  @hw: pointer to the HW structure
5292  *
5293  *  Configures the kumeran interface to the PHY to wait the appropriate time
5294  *  when polling the PHY, then call the generic setup_copper_link to finish
5295  *  configuring the copper link.
5296  **/
5297 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5298 {
5299 	u32 ctrl;
5300 	s32 ret_val;
5301 	u16 reg_data;
5302 
5303 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5304 
5305 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5306 	ctrl |= E1000_CTRL_SLU;
5307 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5308 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5309 
5310 	/* Set the mac to wait the maximum time between each iteration
5311 	 * and increase the max iterations when polling the phy;
5312 	 * this fixes erroneous timeouts at 10Mbps.
5313 	 */
5314 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5315 					       0xFFFF);
5316 	if (ret_val)
5317 		return ret_val;
5318 	ret_val = e1000_read_kmrn_reg_generic(hw,
5319 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
5320 					      &reg_data);
5321 	if (ret_val)
5322 		return ret_val;
5323 	reg_data |= 0x3F;
5324 	ret_val = e1000_write_kmrn_reg_generic(hw,
5325 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
5326 					       reg_data);
5327 	if (ret_val)
5328 		return ret_val;
5329 
5330 	switch (hw->phy.type) {
5331 	case e1000_phy_igp_3:
5332 		ret_val = e1000_copper_link_setup_igp(hw);
5333 		if (ret_val)
5334 			return ret_val;
5335 		break;
5336 	case e1000_phy_bm:
5337 	case e1000_phy_82578:
5338 		ret_val = e1000_copper_link_setup_m88(hw);
5339 		if (ret_val)
5340 			return ret_val;
5341 		break;
5342 	case e1000_phy_82577:
5343 	case e1000_phy_82579:
5344 		ret_val = e1000_copper_link_setup_82577(hw);
5345 		if (ret_val)
5346 			return ret_val;
5347 		break;
5348 	case e1000_phy_ife:
5349 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5350 					       &reg_data);
5351 		if (ret_val)
5352 			return ret_val;
5353 
5354 		reg_data &= ~IFE_PMC_AUTO_MDIX;
5355 
5356 		switch (hw->phy.mdix) {
5357 		case 1:
5358 			reg_data &= ~IFE_PMC_FORCE_MDIX;
5359 			break;
5360 		case 2:
5361 			reg_data |= IFE_PMC_FORCE_MDIX;
5362 			break;
5363 		case 0:
5364 		default:
5365 			reg_data |= IFE_PMC_AUTO_MDIX;
5366 			break;
5367 		}
5368 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5369 						reg_data);
5370 		if (ret_val)
5371 			return ret_val;
5372 		break;
5373 	default:
5374 		break;
5375 	}
5376 
5377 	return e1000_setup_copper_link_generic(hw);
5378 }
5379 
5380 /**
5381  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5382  *  @hw: pointer to the HW structure
5383  *
5384  *  Calls the PHY specific link setup function and then calls the
5385  *  generic setup_copper_link to finish configuring the link for
5386  *  Lynxpoint PCH devices
5387  **/
5388 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5389 {
5390 	u32 ctrl;
5391 	s32 ret_val;
5392 
5393 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5394 
5395 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5396 	ctrl |= E1000_CTRL_SLU;
5397 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5398 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5399 
5400 	ret_val = e1000_copper_link_setup_82577(hw);
5401 	if (ret_val)
5402 		return ret_val;
5403 
5404 	return e1000_setup_copper_link_generic(hw);
5405 }
5406 
5407 /**
5408  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5409  *  @hw: pointer to the HW structure
5410  *  @speed: pointer to store current link speed
5411  *  @duplex: pointer to store the current link duplex
5412  *
5413  *  Calls the generic get_speed_and_duplex to retrieve the current link
5414  *  information and then calls the Kumeran lock loss workaround for links at
5415  *  gigabit speeds.
5416  **/
5417 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5418 					  u16 *duplex)
5419 {
5420 	s32 ret_val;
5421 
5422 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5423 
5424 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5425 	if (ret_val)
5426 		return ret_val;
5427 
5428 	if ((hw->mac.type == e1000_ich8lan) &&
5429 	    (hw->phy.type == e1000_phy_igp_3) &&
5430 	    (*speed == SPEED_1000)) {
5431 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5432 	}
5433 
5434 	return ret_val;
5435 }
5436 
5437 /**
5438  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5439  *  @hw: pointer to the HW structure
5440  *
5441  *  Work-around for 82566 Kumeran PCS lock loss:
5442  *  On link status change (i.e. PCI reset, speed change) and link is up and
5443  *  speed is gigabit-
5444  *    0) if workaround is optionally disabled do nothing
5445  *    1) wait 1ms for Kumeran link to come up
5446  *    2) check Kumeran Diagnostic register PCS lock loss bit
5447  *    3) if not set the link is locked (all is good), otherwise...
5448  *    4) reset the PHY
5449  *    5) repeat up to 10 times
5450  *  Note: this is only called for IGP3 copper when speed is 1gb.
5451  **/
5452 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5453 {
5454 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5455 	u32 phy_ctrl;
5456 	s32 ret_val;
5457 	u16 i, data;
5458 	bool link;
5459 
5460 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5461 
5462 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5463 		return E1000_SUCCESS;
5464 
5465 	/* Make sure link is up before proceeding.  If not just return.
5466 	 * Attempting this while link is negotiating fouled up link
5467 	 * stability
5468 	 */
5469 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5470 	if (!link)
5471 		return E1000_SUCCESS;
5472 
5473 	for (i = 0; i < 10; i++) {
5474 		/* read once to clear */
5475 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5476 		if (ret_val)
5477 			return ret_val;
5478 		/* and again to get new status */
5479 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5480 		if (ret_val)
5481 			return ret_val;
5482 
5483 		/* check for PCS lock */
5484 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5485 			return E1000_SUCCESS;
5486 
5487 		/* Issue PHY reset */
5488 		hw->phy.ops.reset(hw);
5489 		msec_delay_irq(5);
5490 	}
5491 	/* Disable GigE link negotiation */
5492 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5493 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5494 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5495 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5496 
5497 	/* Call gig speed drop workaround on Gig disable before accessing
5498 	 * any PHY registers
5499 	 */
5500 	e1000_gig_downshift_workaround_ich8lan(hw);
5501 
5502 	/* unable to acquire PCS lock */
5503 	return -E1000_ERR_PHY;
5504 }
5505 
5506 /**
5507  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5508  *  @hw: pointer to the HW structure
5509  *  @state: boolean value used to set the current Kumeran workaround state
5510  *
5511  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
5512  *  /disabled - FALSE).
5513  **/
5514 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5515 						 bool state)
5516 {
5517 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5518 
5519 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5520 
5521 	if (hw->mac.type != e1000_ich8lan) {
5522 		DEBUGOUT("Workaround applies to ICH8 only.\n");
5523 		return;
5524 	}
5525 
5526 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5527 
5528 	return;
5529 }
5530 
5531 /**
5532  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5533  *  @hw: pointer to the HW structure
5534  *
5535  *  Workaround for 82566 power-down on D3 entry:
5536  *    1) disable gigabit link
5537  *    2) write VR power-down enable
5538  *    3) read it back
5539  *  Continue if successful, else issue LCD reset and repeat
5540  **/
5541 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5542 {
5543 	u32 reg;
5544 	u16 data;
5545 	u8  retry = 0;
5546 
5547 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5548 
5549 	if (hw->phy.type != e1000_phy_igp_3)
5550 		return;
5551 
5552 	/* Try the workaround twice (if needed) */
5553 	do {
5554 		/* Disable link */
5555 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5556 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5557 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5558 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5559 
5560 		/* Call gig speed drop workaround on Gig disable before
5561 		 * accessing any PHY registers
5562 		 */
5563 		if (hw->mac.type == e1000_ich8lan)
5564 			e1000_gig_downshift_workaround_ich8lan(hw);
5565 
5566 		/* Write VR power-down enable */
5567 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5568 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5569 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5570 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5571 
5572 		/* Read it back and test */
5573 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5574 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5575 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5576 			break;
5577 
5578 		/* Issue PHY reset and repeat at most one more time */
5579 		reg = E1000_READ_REG(hw, E1000_CTRL);
5580 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5581 		retry++;
5582 	} while (retry);
5583 }
5584 
5585 /**
5586  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5587  *  @hw: pointer to the HW structure
5588  *
5589  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5590  *  LPLU, Gig disable, MDIC PHY reset):
5591  *    1) Set Kumeran Near-end loopback
5592  *    2) Clear Kumeran Near-end loopback
5593  *  Should only be called for ICH8[m] devices with any 1G Phy.
5594  **/
5595 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5596 {
5597 	s32 ret_val;
5598 	u16 reg_data;
5599 
5600 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5601 
5602 	if ((hw->mac.type != e1000_ich8lan) ||
5603 	    (hw->phy.type == e1000_phy_ife))
5604 		return;
5605 
5606 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5607 					      &reg_data);
5608 	if (ret_val)
5609 		return;
5610 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5611 	ret_val = e1000_write_kmrn_reg_generic(hw,
5612 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
5613 					       reg_data);
5614 	if (ret_val)
5615 		return;
5616 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5617 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5618 				     reg_data);
5619 }
5620 
5621 /**
5622  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5623  *  @hw: pointer to the HW structure
5624  *
5625  *  During S0 to Sx transition, it is possible the link remains at gig
5626  *  instead of negotiating to a lower speed.  Before going to Sx, set
5627  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5628  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5629  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5630  *  needs to be written.
5631  *  Parts that support (and are linked to a partner which support) EEE in
5632  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5633  *  than 10Mbps w/o EEE.
5634  **/
5635 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5636 {
5637 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5638 	u32 phy_ctrl;
5639 	s32 ret_val;
5640 
5641 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5642 
5643 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5644 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5645 
5646 	if (hw->phy.type == e1000_phy_i217) {
5647 		u16 phy_reg, device_id = hw->device_id;
5648 
5649 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5650 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5651 		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5652 		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5653 		    (hw->mac.type >= e1000_pch_spt)) {
5654 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5655 
5656 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5657 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5658 		}
5659 
5660 		ret_val = hw->phy.ops.acquire(hw);
5661 		if (ret_val)
5662 			goto out;
5663 
5664 		if (!dev_spec->eee_disable) {
5665 			u16 eee_advert;
5666 
5667 			ret_val =
5668 			    e1000_read_emi_reg_locked(hw,
5669 						      I217_EEE_ADVERTISEMENT,
5670 						      &eee_advert);
5671 			if (ret_val)
5672 				goto release;
5673 
5674 			/* Disable LPLU if both link partners support 100BaseT
5675 			 * EEE and 100Full is advertised on both ends of the
5676 			 * link, and enable Auto Enable LPI since there will
5677 			 * be no driver to enable LPI while in Sx.
5678 			 */
5679 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5680 			    (dev_spec->eee_lp_ability &
5681 			     I82579_EEE_100_SUPPORTED) &&
5682 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5683 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5684 					      E1000_PHY_CTRL_NOND0A_LPLU);
5685 
5686 				/* Set Auto Enable LPI after link up */
5687 				hw->phy.ops.read_reg_locked(hw,
5688 							    I217_LPI_GPIO_CTRL,
5689 							    &phy_reg);
5690 				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5691 				hw->phy.ops.write_reg_locked(hw,
5692 							     I217_LPI_GPIO_CTRL,
5693 							     phy_reg);
5694 			}
5695 		}
5696 
5697 		/* For i217 Intel Rapid Start Technology support,
5698 		 * when the system is going into Sx and no manageability engine
5699 		 * is present, the driver must configure proxy to reset only on
5700 		 * power good.  LPI (Low Power Idle) state must also reset only
5701 		 * on power good, as well as the MTA (Multicast table array).
5702 		 * The SMBus release must also be disabled on LCD reset.
5703 		 */
5704 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5705 		      E1000_ICH_FWSM_FW_VALID)) {
5706 			/* Enable proxy to reset only on power good. */
5707 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5708 						    &phy_reg);
5709 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5710 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5711 						     phy_reg);
5712 
5713 			/* Set bit enable LPI (EEE) to reset only on
5714 			 * power good.
5715 			*/
5716 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5717 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5718 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5719 
5720 			/* Disable the SMB release on LCD reset. */
5721 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5722 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5723 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5724 		}
5725 
5726 		/* Enable MTA to reset for Intel Rapid Start Technology
5727 		 * Support
5728 		 */
5729 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5730 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5731 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5732 
5733 release:
5734 		hw->phy.ops.release(hw);
5735 	}
5736 out:
5737 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5738 
5739 	if (hw->mac.type == e1000_ich8lan)
5740 		e1000_gig_downshift_workaround_ich8lan(hw);
5741 
5742 	if (hw->mac.type >= e1000_pchlan) {
5743 		e1000_oem_bits_config_ich8lan(hw, FALSE);
5744 
5745 		/* Reset PHY to activate OEM bits on 82577/8 */
5746 		if (hw->mac.type == e1000_pchlan)
5747 			e1000_phy_hw_reset_generic(hw);
5748 
5749 		ret_val = hw->phy.ops.acquire(hw);
5750 		if (ret_val)
5751 			return;
5752 		e1000_write_smbus_addr(hw);
5753 		hw->phy.ops.release(hw);
5754 	}
5755 
5756 	return;
5757 }
5758 
5759 /**
5760  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5761  *  @hw: pointer to the HW structure
5762  *
5763  *  During Sx to S0 transitions on non-managed devices or managed devices
5764  *  on which PHY resets are not blocked, if the PHY registers cannot be
5765  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5766  *  the PHY.
5767  *  On i217, setup Intel Rapid Start Technology.
5768  **/
5769 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5770 {
5771 	s32 ret_val;
5772 
5773 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
5774 	if (hw->mac.type < e1000_pch2lan)
5775 		return E1000_SUCCESS;
5776 
5777 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5778 	if (ret_val) {
5779 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5780 		return ret_val;
5781 	}
5782 
5783 	/* For i217 Intel Rapid Start Technology support when the system
5784 	 * is transitioning from Sx and no manageability engine is present
5785 	 * configure SMBus to restore on reset, disable proxy, and enable
5786 	 * the reset on MTA (Multicast table array).
5787 	 */
5788 	if (hw->phy.type == e1000_phy_i217) {
5789 		u16 phy_reg;
5790 
5791 		ret_val = hw->phy.ops.acquire(hw);
5792 		if (ret_val) {
5793 			DEBUGOUT("Failed to setup iRST\n");
5794 			return ret_val;
5795 		}
5796 
5797 		/* Clear Auto Enable LPI after link up */
5798 		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5799 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5800 		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5801 
5802 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5803 		    E1000_ICH_FWSM_FW_VALID)) {
5804 			/* Restore clear on SMB if no manageability engine
5805 			 * is present
5806 			 */
5807 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5808 							      &phy_reg);
5809 			if (ret_val)
5810 				goto release;
5811 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5812 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5813 
5814 			/* Disable Proxy */
5815 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5816 		}
5817 		/* Enable reset on MTA */
5818 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5819 						      &phy_reg);
5820 		if (ret_val)
5821 			goto release;
5822 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5823 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5824 release:
5825 		if (ret_val)
5826 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5827 		hw->phy.ops.release(hw);
5828 		return ret_val;
5829 	}
5830 	return E1000_SUCCESS;
5831 }
5832 
5833 /**
5834  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5835  *  @hw: pointer to the HW structure
5836  *
5837  *  Return the LED back to the default configuration.
5838  **/
5839 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5840 {
5841 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
5842 
5843 	if (hw->phy.type == e1000_phy_ife)
5844 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5845 					     0);
5846 
5847 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5848 	return E1000_SUCCESS;
5849 }
5850 
5851 /**
5852  *  e1000_led_on_ich8lan - Turn LEDs on
5853  *  @hw: pointer to the HW structure
5854  *
5855  *  Turn on the LEDs.
5856  **/
5857 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5858 {
5859 	DEBUGFUNC("e1000_led_on_ich8lan");
5860 
5861 	if (hw->phy.type == e1000_phy_ife)
5862 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5863 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5864 
5865 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5866 	return E1000_SUCCESS;
5867 }
5868 
5869 /**
5870  *  e1000_led_off_ich8lan - Turn LEDs off
5871  *  @hw: pointer to the HW structure
5872  *
5873  *  Turn off the LEDs.
5874  **/
5875 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5876 {
5877 	DEBUGFUNC("e1000_led_off_ich8lan");
5878 
5879 	if (hw->phy.type == e1000_phy_ife)
5880 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5881 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5882 
5883 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5884 	return E1000_SUCCESS;
5885 }
5886 
5887 /**
5888  *  e1000_setup_led_pchlan - Configures SW controllable LED
5889  *  @hw: pointer to the HW structure
5890  *
5891  *  This prepares the SW controllable LED for use.
5892  **/
5893 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5894 {
5895 	DEBUGFUNC("e1000_setup_led_pchlan");
5896 
5897 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5898 				     (u16)hw->mac.ledctl_mode1);
5899 }
5900 
5901 /**
5902  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5903  *  @hw: pointer to the HW structure
5904  *
5905  *  Return the LED back to the default configuration.
5906  **/
5907 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5908 {
5909 	DEBUGFUNC("e1000_cleanup_led_pchlan");
5910 
5911 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5912 				     (u16)hw->mac.ledctl_default);
5913 }
5914 
5915 /**
5916  *  e1000_led_on_pchlan - Turn LEDs on
5917  *  @hw: pointer to the HW structure
5918  *
5919  *  Turn on the LEDs.
5920  **/
5921 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5922 {
5923 	u16 data = (u16)hw->mac.ledctl_mode2;
5924 	u32 i, led;
5925 
5926 	DEBUGFUNC("e1000_led_on_pchlan");
5927 
5928 	/* If no link, then turn LED on by setting the invert bit
5929 	 * for each LED that's mode is "link_up" in ledctl_mode2.
5930 	 */
5931 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5932 		for (i = 0; i < 3; i++) {
5933 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5934 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5935 			    E1000_LEDCTL_MODE_LINK_UP)
5936 				continue;
5937 			if (led & E1000_PHY_LED0_IVRT)
5938 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5939 			else
5940 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5941 		}
5942 	}
5943 
5944 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5945 }
5946 
5947 /**
5948  *  e1000_led_off_pchlan - Turn LEDs off
5949  *  @hw: pointer to the HW structure
5950  *
5951  *  Turn off the LEDs.
5952  **/
5953 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5954 {
5955 	u16 data = (u16)hw->mac.ledctl_mode1;
5956 	u32 i, led;
5957 
5958 	DEBUGFUNC("e1000_led_off_pchlan");
5959 
5960 	/* If no link, then turn LED off by clearing the invert bit
5961 	 * for each LED that's mode is "link_up" in ledctl_mode1.
5962 	 */
5963 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5964 		for (i = 0; i < 3; i++) {
5965 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5966 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5967 			    E1000_LEDCTL_MODE_LINK_UP)
5968 				continue;
5969 			if (led & E1000_PHY_LED0_IVRT)
5970 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5971 			else
5972 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5973 		}
5974 	}
5975 
5976 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5977 }
5978 
5979 /**
5980  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5981  *  @hw: pointer to the HW structure
5982  *
5983  *  Read appropriate register for the config done bit for completion status
5984  *  and configure the PHY through s/w for EEPROM-less parts.
5985  *
5986  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5987  *  config done bit, so only an error is logged and continues.  If we were
5988  *  to return with error, EEPROM-less silicon would not be able to be reset
5989  *  or change link.
5990  **/
5991 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5992 {
5993 	s32 ret_val = E1000_SUCCESS;
5994 	u32 bank = 0;
5995 	u32 status;
5996 
5997 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5998 
5999 	e1000_get_cfg_done_generic(hw);
6000 
6001 	/* Wait for indication from h/w that it has completed basic config */
6002 	if (hw->mac.type >= e1000_ich10lan) {
6003 		e1000_lan_init_done_ich8lan(hw);
6004 	} else {
6005 		ret_val = e1000_get_auto_rd_done_generic(hw);
6006 		if (ret_val) {
6007 			/* When auto config read does not complete, do not
6008 			 * return with an error. This can happen in situations
6009 			 * where there is no eeprom and prevents getting link.
6010 			 */
6011 			DEBUGOUT("Auto Read Done did not complete\n");
6012 			ret_val = E1000_SUCCESS;
6013 		}
6014 	}
6015 
6016 	/* Clear PHY Reset Asserted bit */
6017 	status = E1000_READ_REG(hw, E1000_STATUS);
6018 	if (status & E1000_STATUS_PHYRA)
6019 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6020 	else
6021 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6022 
6023 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
6024 	if (hw->mac.type <= e1000_ich9lan) {
6025 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6026 		    (hw->phy.type == e1000_phy_igp_3)) {
6027 			e1000_phy_init_script_igp3(hw);
6028 		}
6029 	} else {
6030 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6031 			/* Maybe we should do a basic PHY config */
6032 			DEBUGOUT("EEPROM not present\n");
6033 			ret_val = -E1000_ERR_CONFIG;
6034 		}
6035 	}
6036 
6037 	return ret_val;
6038 }
6039 
6040 /**
6041  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6042  * @hw: pointer to the HW structure
6043  *
6044  * In the case of a PHY power down to save power, or to turn off link during a
6045  * driver unload, or wake on lan is not enabled, remove the link.
6046  **/
6047 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6048 {
6049 	/* If the management interface is not enabled, then power down */
6050 	if (!(hw->mac.ops.check_mng_mode(hw) ||
6051 	      hw->phy.ops.check_reset_block(hw)))
6052 		e1000_power_down_phy_copper(hw);
6053 
6054 	return;
6055 }
6056 
6057 /**
6058  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6059  *  @hw: pointer to the HW structure
6060  *
6061  *  Clears hardware counters specific to the silicon family and calls
6062  *  clear_hw_cntrs_generic to clear all general purpose counters.
6063  **/
6064 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6065 {
6066 	u16 phy_data;
6067 	s32 ret_val;
6068 
6069 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6070 
6071 	e1000_clear_hw_cntrs_base_generic(hw);
6072 
6073 	E1000_READ_REG(hw, E1000_ALGNERRC);
6074 	E1000_READ_REG(hw, E1000_RXERRC);
6075 	E1000_READ_REG(hw, E1000_TNCRS);
6076 	E1000_READ_REG(hw, E1000_CEXTERR);
6077 	E1000_READ_REG(hw, E1000_TSCTC);
6078 	E1000_READ_REG(hw, E1000_TSCTFC);
6079 
6080 	E1000_READ_REG(hw, E1000_MGTPRC);
6081 	E1000_READ_REG(hw, E1000_MGTPDC);
6082 	E1000_READ_REG(hw, E1000_MGTPTC);
6083 
6084 	E1000_READ_REG(hw, E1000_IAC);
6085 	E1000_READ_REG(hw, E1000_ICRXOC);
6086 
6087 	/* Clear PHY statistics registers */
6088 	if ((hw->phy.type == e1000_phy_82578) ||
6089 	    (hw->phy.type == e1000_phy_82579) ||
6090 	    (hw->phy.type == e1000_phy_i217) ||
6091 	    (hw->phy.type == e1000_phy_82577)) {
6092 		ret_val = hw->phy.ops.acquire(hw);
6093 		if (ret_val)
6094 			return;
6095 		ret_val = hw->phy.ops.set_page(hw,
6096 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
6097 		if (ret_val)
6098 			goto release;
6099 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6100 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6101 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6102 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6103 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6104 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6105 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6106 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6107 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6108 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6109 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6110 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6111 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6112 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6113 release:
6114 		hw->phy.ops.release(hw);
6115 	}
6116 }
6117 
6118