xref: /dragonfly/sys/dev/netif/ig_hal/e1000_ich8lan.c (revision 73610d44)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD:$*/
34 
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  * Ethernet Connection I217-LM
63  * Ethernet Connection I217-V
64  * Ethernet Connection I218-V
65  * Ethernet Connection I218-LM
66  * Ethernet Connection (2) I218-LM
67  * Ethernet Connection (2) I218-V
68  * Ethernet Connection (3) I218-LM
69  * Ethernet Connection (3) I218-V
70  */
71 
72 #include "e1000_api.h"
73 
74 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84 					      u8 *mc_addr_list,
85 					      u32 mc_addr_count);
86 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90 					    bool active);
91 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92 					    bool active);
93 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 				   u16 words, u16 *data);
95 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96 				    u16 words, u16 *data);
97 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
98 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
99 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
100 					    u16 *data);
101 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
102 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
103 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
104 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
105 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
108 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
109 					   u16 *speed, u16 *duplex);
110 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
111 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
112 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
113 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
114 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
115 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
116 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
117 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
118 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
119 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
120 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
121 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
122 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
123 					  u32 offset, u8 *data);
124 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
125 					  u8 size, u16 *data);
126 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
127 					  u32 offset, u16 *data);
128 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
129 						 u32 offset, u8 byte);
130 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
131 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
133 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
134 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
135 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
136 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
137 
138 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
139 /* Offset 04h HSFSTS */
140 union ich8_hws_flash_status {
141 	struct ich8_hsfsts {
142 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
143 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
144 		u16 dael:1; /* bit 2 Direct Access error Log */
145 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
146 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
147 		u16 reserved1:2; /* bit 13:6 Reserved */
148 		u16 reserved2:6; /* bit 13:6 Reserved */
149 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
150 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
151 	} hsf_status;
152 	u16 regval;
153 };
154 
155 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
156 /* Offset 06h FLCTL */
157 union ich8_hws_flash_ctrl {
158 	struct ich8_hsflctl {
159 		u16 flcgo:1;   /* 0 Flash Cycle Go */
160 		u16 flcycle:2;   /* 2:1 Flash Cycle */
161 		u16 reserved:5;   /* 7:3 Reserved  */
162 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
163 		u16 flockdn:6;   /* 15:10 Reserved */
164 	} hsf_ctrl;
165 	u16 regval;
166 };
167 
168 /* ICH Flash Region Access Permissions */
169 union ich8_hws_flash_regacc {
170 	struct ich8_flracc {
171 		u32 grra:8; /* 0:7 GbE region Read Access */
172 		u32 grwa:8; /* 8:15 GbE region Write Access */
173 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
174 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
175 	} hsf_flregacc;
176 	u16 regval;
177 };
178 
179 /**
180  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
181  *  @hw: pointer to the HW structure
182  *
183  *  Test access to the PHY registers by reading the PHY ID registers.  If
184  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
185  *  otherwise assume the read PHY ID is correct if it is valid.
186  *
187  *  Assumes the sw/fw/hw semaphore is already acquired.
188  **/
189 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
190 {
191 	u16 phy_reg = 0;
192 	u32 phy_id = 0;
193 	s32 ret_val = 0;
194 	u16 retry_count;
195 	u32 mac_reg = 0;
196 
197 	for (retry_count = 0; retry_count < 2; retry_count++) {
198 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
199 		if (ret_val || (phy_reg == 0xFFFF))
200 			continue;
201 		phy_id = (u32)(phy_reg << 16);
202 
203 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
204 		if (ret_val || (phy_reg == 0xFFFF)) {
205 			phy_id = 0;
206 			continue;
207 		}
208 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
209 		break;
210 	}
211 
212 	if (hw->phy.id) {
213 		if  (hw->phy.id == phy_id)
214 			goto out;
215 	} else if (phy_id) {
216 		hw->phy.id = phy_id;
217 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
218 		goto out;
219 	}
220 
221 	/* In case the PHY needs to be in mdio slow mode,
222 	 * set slow mode and try to get the PHY id again.
223 	 */
224 	if (hw->mac.type < e1000_pch_lpt) {
225 		hw->phy.ops.release(hw);
226 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
227 		if (!ret_val)
228 			ret_val = e1000_get_phy_id(hw);
229 		hw->phy.ops.acquire(hw);
230 	}
231 
232 	if (ret_val)
233 		return FALSE;
234 out:
235 	if (hw->mac.type == e1000_pch_lpt) {
236 		/* Unforce SMBus mode in PHY */
237 		hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
238 		phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
239 		hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
240 
241 		/* Unforce SMBus mode in MAC */
242 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
243 		mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
244 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
245 	}
246 
247 	return TRUE;
248 }
249 
250 /**
251  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
252  *  @hw: pointer to the HW structure
253  *
254  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
255  *  used to reset the PHY to a quiescent state when necessary.
256  **/
257 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
258 {
259 	u32 mac_reg;
260 
261 	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
262 
263 	/* Set Phy Config Counter to 50msec */
264 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
265 	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
266 	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
267 	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
268 
269 	/* Toggle LANPHYPC Value bit */
270 	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
271 	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
272 	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
273 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
274 	E1000_WRITE_FLUSH(hw);
275 	usec_delay(10);
276 	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
277 	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
278 	E1000_WRITE_FLUSH(hw);
279 
280 	if (hw->mac.type < e1000_pch_lpt) {
281 		msec_delay(50);
282 	} else {
283 		u16 count = 20;
284 
285 		do {
286 			msec_delay(5);
287 		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
288 			   E1000_CTRL_EXT_LPCD) && count--);
289 
290 		msec_delay(30);
291 	}
292 }
293 
294 /**
295  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
296  *  @hw: pointer to the HW structure
297  *
298  *  Workarounds/flow necessary for PHY initialization during driver load
299  *  and resume paths.
300  **/
301 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
302 {
303 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
304 	s32 ret_val;
305 
306 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
307 
308 	/* Gate automatic PHY configuration by hardware on managed and
309 	 * non-managed 82579 and newer adapters.
310 	 */
311 	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
312 
313 	/* It is not possible to be certain of the current state of ULP
314 	 * so forcibly disable it.
315 	 */
316 	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
317 	e1000_disable_ulp_lpt_lp(hw, TRUE);
318 
319 	ret_val = hw->phy.ops.acquire(hw);
320 	if (ret_val) {
321 		DEBUGOUT("Failed to initialize PHY flow\n");
322 		goto out;
323 	}
324 
325 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
326 	 * inaccessible and resetting the PHY is not blocked, toggle the
327 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
328 	 */
329 	switch (hw->mac.type) {
330 	case e1000_pch_lpt:
331 		if (e1000_phy_is_accessible_pchlan(hw))
332 			break;
333 
334 		/* Before toggling LANPHYPC, see if PHY is accessible by
335 		 * forcing MAC to SMBus mode first.
336 		 */
337 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
338 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
339 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
340 
341 		/* Wait 50 milliseconds for MAC to finish any retries
342 		 * that it might be trying to perform from previous
343 		 * attempts to acknowledge any phy read requests.
344 		 */
345 		 msec_delay(50);
346 
347 		/* fall-through */
348 	case e1000_pch2lan:
349 		if (e1000_phy_is_accessible_pchlan(hw))
350 			break;
351 
352 		/* fall-through */
353 	case e1000_pchlan:
354 		if ((hw->mac.type == e1000_pchlan) &&
355 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
356 			break;
357 
358 		if (hw->phy.ops.check_reset_block(hw)) {
359 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
360 			ret_val = -E1000_ERR_PHY;
361 			break;
362 		}
363 
364 		/* Toggle LANPHYPC Value bit */
365 		e1000_toggle_lanphypc_pch_lpt(hw);
366 		if (hw->mac.type >= e1000_pch_lpt) {
367 			if (e1000_phy_is_accessible_pchlan(hw))
368 				break;
369 
370 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
371 			 * so ensure that the MAC is also out of SMBus mode
372 			 */
373 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
374 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
375 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
376 
377 			if (e1000_phy_is_accessible_pchlan(hw))
378 				break;
379 
380 			ret_val = -E1000_ERR_PHY;
381 		}
382 		break;
383 	default:
384 		break;
385 	}
386 
387 	hw->phy.ops.release(hw);
388 	if (!ret_val) {
389 
390 		/* Check to see if able to reset PHY.  Print error if not */
391 		if (hw->phy.ops.check_reset_block(hw)) {
392 			ERROR_REPORT("Reset blocked by ME\n");
393 			goto out;
394 		}
395 
396 		/* Reset the PHY before any access to it.  Doing so, ensures
397 		 * that the PHY is in a known good state before we read/write
398 		 * PHY registers.  The generic reset is sufficient here,
399 		 * because we haven't determined the PHY type yet.
400 		 */
401 		ret_val = e1000_phy_hw_reset_generic(hw);
402 		if (ret_val)
403 			goto out;
404 
405 		/* On a successful reset, possibly need to wait for the PHY
406 		 * to quiesce to an accessible state before returning control
407 		 * to the calling function.  If the PHY does not quiesce, then
408 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
409 		 *  the PHY is in.
410 		 */
411 		ret_val = hw->phy.ops.check_reset_block(hw);
412 		if (ret_val)
413 			ERROR_REPORT("ME blocked access to PHY after reset\n");
414 	}
415 
416 out:
417 	/* Ungate automatic PHY configuration on non-managed 82579 */
418 	if ((hw->mac.type == e1000_pch2lan) &&
419 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
420 		msec_delay(10);
421 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
422 	}
423 
424 	return ret_val;
425 }
426 
427 /**
428  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
429  *  @hw: pointer to the HW structure
430  *
431  *  Initialize family-specific PHY parameters and function pointers.
432  **/
433 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
434 {
435 	struct e1000_phy_info *phy = &hw->phy;
436 	s32 ret_val;
437 
438 	DEBUGFUNC("e1000_init_phy_params_pchlan");
439 
440 	phy->addr		= 1;
441 	phy->reset_delay_us	= 100;
442 
443 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
444 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
445 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
446 	phy->ops.set_page	= e1000_set_page_igp;
447 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
448 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
449 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
450 	phy->ops.release	= e1000_release_swflag_ich8lan;
451 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
452 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
453 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
454 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
455 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
456 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
457 	phy->ops.power_up	= e1000_power_up_phy_copper;
458 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
459 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
460 
461 	phy->id = e1000_phy_unknown;
462 
463 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
464 	if (ret_val)
465 		return ret_val;
466 
467 	if (phy->id == e1000_phy_unknown)
468 		switch (hw->mac.type) {
469 		default:
470 			ret_val = e1000_get_phy_id(hw);
471 			if (ret_val)
472 				return ret_val;
473 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
474 				break;
475 			/* fall-through */
476 		case e1000_pch2lan:
477 		case e1000_pch_lpt:
478 			/* In case the PHY needs to be in mdio slow mode,
479 			 * set slow mode and try to get the PHY id again.
480 			 */
481 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
482 			if (ret_val)
483 				return ret_val;
484 			ret_val = e1000_get_phy_id(hw);
485 			if (ret_val)
486 				return ret_val;
487 			break;
488 		}
489 	phy->type = e1000_get_phy_type_from_id(phy->id);
490 
491 	switch (phy->type) {
492 	case e1000_phy_82577:
493 	case e1000_phy_82579:
494 	case e1000_phy_i217:
495 		phy->ops.check_polarity = e1000_check_polarity_82577;
496 		phy->ops.force_speed_duplex =
497 			e1000_phy_force_speed_duplex_82577;
498 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
499 		phy->ops.get_info = e1000_get_phy_info_82577;
500 		phy->ops.commit = e1000_phy_sw_reset_generic;
501 		break;
502 	case e1000_phy_82578:
503 		phy->ops.check_polarity = e1000_check_polarity_m88;
504 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
505 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
506 		phy->ops.get_info = e1000_get_phy_info_m88;
507 		break;
508 	default:
509 		ret_val = -E1000_ERR_PHY;
510 		break;
511 	}
512 
513 	return ret_val;
514 }
515 
516 /**
517  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
518  *  @hw: pointer to the HW structure
519  *
520  *  Initialize family-specific PHY parameters and function pointers.
521  **/
522 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
523 {
524 	struct e1000_phy_info *phy = &hw->phy;
525 	s32 ret_val;
526 	u16 i = 0;
527 
528 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
529 
530 	phy->addr		= 1;
531 	phy->reset_delay_us	= 100;
532 
533 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
534 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
535 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
536 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
537 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
538 	phy->ops.release	= e1000_release_swflag_ich8lan;
539 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
540 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
541 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
542 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
543 	phy->ops.power_up	= e1000_power_up_phy_copper;
544 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
545 
546 	/* We may need to do this twice - once for IGP and if that fails,
547 	 * we'll set BM func pointers and try again
548 	 */
549 	ret_val = e1000_determine_phy_address(hw);
550 	if (ret_val) {
551 		phy->ops.write_reg = e1000_write_phy_reg_bm;
552 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
553 		ret_val = e1000_determine_phy_address(hw);
554 		if (ret_val) {
555 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
556 			return ret_val;
557 		}
558 	}
559 
560 	phy->id = 0;
561 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
562 	       (i++ < 100)) {
563 		msec_delay(1);
564 		ret_val = e1000_get_phy_id(hw);
565 		if (ret_val)
566 			return ret_val;
567 	}
568 
569 	/* Verify phy id */
570 	switch (phy->id) {
571 	case IGP03E1000_E_PHY_ID:
572 		phy->type = e1000_phy_igp_3;
573 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
574 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
575 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
576 		phy->ops.get_info = e1000_get_phy_info_igp;
577 		phy->ops.check_polarity = e1000_check_polarity_igp;
578 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
579 		break;
580 	case IFE_E_PHY_ID:
581 	case IFE_PLUS_E_PHY_ID:
582 	case IFE_C_E_PHY_ID:
583 		phy->type = e1000_phy_ife;
584 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
585 		phy->ops.get_info = e1000_get_phy_info_ife;
586 		phy->ops.check_polarity = e1000_check_polarity_ife;
587 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
588 		break;
589 	case BME1000_E_PHY_ID:
590 		phy->type = e1000_phy_bm;
591 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
592 		phy->ops.read_reg = e1000_read_phy_reg_bm;
593 		phy->ops.write_reg = e1000_write_phy_reg_bm;
594 		phy->ops.commit = e1000_phy_sw_reset_generic;
595 		phy->ops.get_info = e1000_get_phy_info_m88;
596 		phy->ops.check_polarity = e1000_check_polarity_m88;
597 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
598 		break;
599 	default:
600 		return -E1000_ERR_PHY;
601 		break;
602 	}
603 
604 	return E1000_SUCCESS;
605 }
606 
607 /**
608  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
609  *  @hw: pointer to the HW structure
610  *
611  *  Initialize family-specific NVM parameters and function
612  *  pointers.
613  **/
614 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
615 {
616 	struct e1000_nvm_info *nvm = &hw->nvm;
617 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
618 	u32 gfpreg, sector_base_addr, sector_end_addr;
619 	u16 i;
620 
621 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
622 
623 	/* Can't read flash registers if the register set isn't mapped. */
624 	nvm->type = e1000_nvm_flash_sw;
625 	if (!hw->flash_address) {
626 		DEBUGOUT("ERROR: Flash registers not mapped\n");
627 		return -E1000_ERR_CONFIG;
628 	}
629 
630 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
631 
632 	/* sector_X_addr is a "sector"-aligned address (4096 bytes)
633 	 * Add 1 to sector_end_addr since this sector is included in
634 	 * the overall size.
635 	 */
636 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
637 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
638 
639 	/* flash_base_addr is byte-aligned */
640 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
641 
642 	/* find total size of the NVM, then cut in half since the total
643 	 * size represents two separate NVM banks.
644 	 */
645 	nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
646 				<< FLASH_SECTOR_ADDR_SHIFT);
647 	nvm->flash_bank_size /= 2;
648 	/* Adjust to word count */
649 	nvm->flash_bank_size /= sizeof(u16);
650 
651 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
652 
653 	/* Clear shadow ram */
654 	for (i = 0; i < nvm->word_size; i++) {
655 		dev_spec->shadow_ram[i].modified = FALSE;
656 		dev_spec->shadow_ram[i].value    = 0xFFFF;
657 	}
658 
659 	/* Function Pointers */
660 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
661 	nvm->ops.release	= e1000_release_nvm_ich8lan;
662 	nvm->ops.read		= e1000_read_nvm_ich8lan;
663 	nvm->ops.update		= e1000_update_nvm_checksum_ich8lan;
664 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
665 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
666 	nvm->ops.write		= e1000_write_nvm_ich8lan;
667 
668 	return E1000_SUCCESS;
669 }
670 
671 /**
672  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
673  *  @hw: pointer to the HW structure
674  *
675  *  Initialize family-specific MAC parameters and function
676  *  pointers.
677  **/
678 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
679 {
680 	struct e1000_mac_info *mac = &hw->mac;
681 	u16 pci_cfg;
682 
683 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
684 
685 	/* Set media type function pointer */
686 	hw->phy.media_type = e1000_media_type_copper;
687 
688 	/* Set mta register count */
689 	mac->mta_reg_count = 32;
690 	/* Set rar entry count */
691 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
692 	if (mac->type == e1000_ich8lan)
693 		mac->rar_entry_count--;
694 	/* Set if part includes ASF firmware */
695 	mac->asf_firmware_present = TRUE;
696 	/* FWSM register */
697 	mac->has_fwsm = TRUE;
698 	/* ARC subsystem not supported */
699 	mac->arc_subsystem_valid = FALSE;
700 	/* Adaptive IFS supported */
701 	mac->adaptive_ifs = TRUE;
702 
703 	/* Function pointers */
704 
705 	/* bus type/speed/width */
706 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
707 	/* function id */
708 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
709 	/* reset */
710 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
711 	/* hw initialization */
712 	mac->ops.init_hw = e1000_init_hw_ich8lan;
713 	/* link setup */
714 	mac->ops.setup_link = e1000_setup_link_ich8lan;
715 	/* physical interface setup */
716 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
717 	/* check for link */
718 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
719 	/* link info */
720 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
721 	/* multicast address update */
722 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
723 	/* clear hardware counters */
724 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
725 
726 	/* LED and other operations */
727 	switch (mac->type) {
728 	case e1000_ich8lan:
729 	case e1000_ich9lan:
730 	case e1000_ich10lan:
731 		/* check management mode */
732 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
733 		/* ID LED init */
734 		mac->ops.id_led_init = e1000_id_led_init_generic;
735 		/* blink LED */
736 		mac->ops.blink_led = e1000_blink_led_generic;
737 		/* setup LED */
738 		mac->ops.setup_led = e1000_setup_led_generic;
739 		/* cleanup LED */
740 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
741 		/* turn on/off LED */
742 		mac->ops.led_on = e1000_led_on_ich8lan;
743 		mac->ops.led_off = e1000_led_off_ich8lan;
744 		break;
745 	case e1000_pch2lan:
746 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
747 		mac->ops.rar_set = e1000_rar_set_pch2lan;
748 		/* fall-through */
749 	case e1000_pch_lpt:
750 		/* multicast address update for pch2 */
751 		mac->ops.update_mc_addr_list =
752 			e1000_update_mc_addr_list_pch2lan;
753 	case e1000_pchlan:
754 		/* save PCH revision_id */
755 		e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
756 		hw->revision_id = (u8)(pci_cfg &= 0x000F);
757 		/* check management mode */
758 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
759 		/* ID LED init */
760 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
761 		/* setup LED */
762 		mac->ops.setup_led = e1000_setup_led_pchlan;
763 		/* cleanup LED */
764 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
765 		/* turn on/off LED */
766 		mac->ops.led_on = e1000_led_on_pchlan;
767 		mac->ops.led_off = e1000_led_off_pchlan;
768 		break;
769 	default:
770 		break;
771 	}
772 
773 	if (mac->type == e1000_pch_lpt) {
774 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
775 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
776 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
777 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
778 	}
779 
780 	/* Enable PCS Lock-loss workaround for ICH8 */
781 	if (mac->type == e1000_ich8lan)
782 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
783 
784 	return E1000_SUCCESS;
785 }
786 
787 /**
788  *  __e1000_access_emi_reg_locked - Read/write EMI register
789  *  @hw: pointer to the HW structure
790  *  @addr: EMI address to program
791  *  @data: pointer to value to read/write from/to the EMI address
792  *  @read: boolean flag to indicate read or write
793  *
794  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
795  **/
796 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
797 					 u16 *data, bool read)
798 {
799 	s32 ret_val;
800 
801 	DEBUGFUNC("__e1000_access_emi_reg_locked");
802 
803 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
804 	if (ret_val)
805 		return ret_val;
806 
807 	if (read)
808 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
809 						      data);
810 	else
811 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
812 						       *data);
813 
814 	return ret_val;
815 }
816 
817 /**
818  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
819  *  @hw: pointer to the HW structure
820  *  @addr: EMI address to program
821  *  @data: value to be read from the EMI address
822  *
823  *  Assumes the SW/FW/HW Semaphore is already acquired.
824  **/
825 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
826 {
827 	DEBUGFUNC("e1000_read_emi_reg_locked");
828 
829 	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
830 }
831 
832 /**
833  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
834  *  @hw: pointer to the HW structure
835  *  @addr: EMI address to program
836  *  @data: value to be written to the EMI address
837  *
838  *  Assumes the SW/FW/HW Semaphore is already acquired.
839  **/
840 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
841 {
842 	DEBUGFUNC("e1000_read_emi_reg_locked");
843 
844 	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
845 }
846 
847 /**
848  *  e1000_set_eee_pchlan - Enable/disable EEE support
849  *  @hw: pointer to the HW structure
850  *
851  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
852  *  the link and the EEE capabilities of the link partner.  The LPI Control
853  *  register bits will remain set only if/when link is up.
854  *
855  *  EEE LPI must not be asserted earlier than one second after link is up.
856  *  On 82579, EEE LPI should not be enabled until such time otherwise there
857  *  can be link issues with some switches.  Other devices can have EEE LPI
858  *  enabled immediately upon link up since they have a timer in hardware which
859  *  prevents LPI from being asserted too early.
860  **/
861 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
862 {
863 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
864 	s32 ret_val;
865 	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
866 
867 	DEBUGFUNC("e1000_set_eee_pchlan");
868 
869 	switch (hw->phy.type) {
870 	case e1000_phy_82579:
871 		lpa = I82579_EEE_LP_ABILITY;
872 		pcs_status = I82579_EEE_PCS_STATUS;
873 		adv_addr = I82579_EEE_ADVERTISEMENT;
874 		break;
875 	case e1000_phy_i217:
876 		lpa = I217_EEE_LP_ABILITY;
877 		pcs_status = I217_EEE_PCS_STATUS;
878 		adv_addr = I217_EEE_ADVERTISEMENT;
879 		break;
880 	default:
881 		return E1000_SUCCESS;
882 	}
883 
884 	ret_val = hw->phy.ops.acquire(hw);
885 	if (ret_val)
886 		return ret_val;
887 
888 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
889 	if (ret_val)
890 		goto release;
891 
892 	/* Clear bits that enable EEE in various speeds */
893 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
894 
895 	/* Enable EEE if not disabled by user */
896 	if (!dev_spec->eee_disable) {
897 		/* Save off link partner's EEE ability */
898 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
899 						    &dev_spec->eee_lp_ability);
900 		if (ret_val)
901 			goto release;
902 
903 		/* Read EEE advertisement */
904 		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
905 		if (ret_val)
906 			goto release;
907 
908 		/* Enable EEE only for speeds in which the link partner is
909 		 * EEE capable and for which we advertise EEE.
910 		 */
911 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
912 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
913 
914 		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
915 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
916 			if (data & NWAY_LPAR_100TX_FD_CAPS)
917 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
918 			else
919 				/* EEE is not supported in 100Half, so ignore
920 				 * partner's EEE in 100 ability if full-duplex
921 				 * is not advertised.
922 				 */
923 				dev_spec->eee_lp_ability &=
924 				    ~I82579_EEE_100_SUPPORTED;
925 		}
926 	}
927 
928 	if (hw->phy.type == e1000_phy_82579) {
929 		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
930 						    &data);
931 		if (ret_val)
932 			goto release;
933 
934 		data &= ~I82579_LPI_100_PLL_SHUT;
935 		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
936 						     data);
937 	}
938 
939 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
940 	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
941 	if (ret_val)
942 		goto release;
943 
944 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
945 release:
946 	hw->phy.ops.release(hw);
947 
948 	return ret_val;
949 }
950 
951 /**
952  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
953  *  @hw:   pointer to the HW structure
954  *  @link: link up bool flag
955  *
956  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
957  *  preventing further DMA write requests.  Workaround the issue by disabling
958  *  the de-assertion of the clock request when in 1Gpbs mode.
959  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
960  *  speeds in order to avoid Tx hangs.
961  **/
962 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
963 {
964 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
965 	u32 status = E1000_READ_REG(hw, E1000_STATUS);
966 	s32 ret_val = E1000_SUCCESS;
967 	u16 reg;
968 
969 	if (link && (status & E1000_STATUS_SPEED_1000)) {
970 		ret_val = hw->phy.ops.acquire(hw);
971 		if (ret_val)
972 			return ret_val;
973 
974 		ret_val =
975 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
976 					       &reg);
977 		if (ret_val)
978 			goto release;
979 
980 		ret_val =
981 		    e1000_write_kmrn_reg_locked(hw,
982 						E1000_KMRNCTRLSTA_K1_CONFIG,
983 						reg &
984 						~E1000_KMRNCTRLSTA_K1_ENABLE);
985 		if (ret_val)
986 			goto release;
987 
988 		usec_delay(10);
989 
990 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
991 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
992 
993 		ret_val =
994 		    e1000_write_kmrn_reg_locked(hw,
995 						E1000_KMRNCTRLSTA_K1_CONFIG,
996 						reg);
997 release:
998 		hw->phy.ops.release(hw);
999 	} else {
1000 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
1001 		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1002 
1003 		if (!link || ((status & E1000_STATUS_SPEED_100) &&
1004 			      (status & E1000_STATUS_FD)))
1005 			goto update_fextnvm6;
1006 
1007 		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1008 		if (ret_val)
1009 			return ret_val;
1010 
1011 		/* Clear link status transmit timeout */
1012 		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1013 
1014 		if (status & E1000_STATUS_SPEED_100) {
1015 			/* Set inband Tx timeout to 5x10us for 100Half */
1016 			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1017 
1018 			/* Do not extend the K1 entry latency for 100Half */
1019 			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1020 		} else {
1021 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1022 			reg |= 50 <<
1023 			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1024 
1025 			/* Extend the K1 entry latency for 10 Mbps */
1026 			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1027 		}
1028 
1029 		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1030 		if (ret_val)
1031 			return ret_val;
1032 
1033 update_fextnvm6:
1034 		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1035 	}
1036 
1037 	return ret_val;
1038 }
1039 
1040 static u64 e1000_ltr2ns(u16 ltr)
1041 {
1042 	u32 value, scale;
1043 
1044 	/* Determine the latency in nsec based on the LTR value & scale */
1045 	value = ltr & E1000_LTRV_VALUE_MASK;
1046 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1047 
1048 	return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1049 }
1050 
1051 /**
1052  *  e1000_platform_pm_pch_lpt - Set platform power management values
1053  *  @hw: pointer to the HW structure
1054  *  @link: bool indicating link status
1055  *
1056  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1057  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1058  *  when link is up (which must not exceed the maximum latency supported
1059  *  by the platform), otherwise specify there is no LTR requirement.
1060  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1061  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1062  *  Capability register set, on this device LTR is set by writing the
1063  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1064  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1065  *  message to the PMC.
1066  *
1067  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1068  *  high-water mark.
1069  **/
1070 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1071 {
1072 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1073 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1074 	u16 lat_enc = 0;	/* latency encoded */
1075 	s32 obff_hwm = 0;
1076 
1077 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
1078 
1079 	if (link) {
1080 		u16 speed, duplex, scale = 0;
1081 		u16 max_snoop, max_nosnoop;
1082 		u16 max_ltr_enc;	/* max LTR latency encoded */
1083 		s64 lat_ns;		/* latency (ns) */
1084 		s64 value;
1085 		u32 rxa;
1086 
1087 		if (!hw->mac.max_frame_size) {
1088 			DEBUGOUT("max_frame_size not set.\n");
1089 			return -E1000_ERR_CONFIG;
1090 		}
1091 
1092 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1093 		if (!speed) {
1094 			DEBUGOUT("Speed not set.\n");
1095 			return -E1000_ERR_CONFIG;
1096 		}
1097 
1098 		/* Rx Packet Buffer Allocation size (KB) */
1099 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1100 
1101 		/* Determine the maximum latency tolerated by the device.
1102 		 *
1103 		 * Per the PCIe spec, the tolerated latencies are encoded as
1104 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1105 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1106 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1107 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1108 		 */
1109 		lat_ns = ((s64)rxa * 1024 -
1110 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1111 		if (lat_ns < 0)
1112 			lat_ns = 0;
1113 		else
1114 			lat_ns /= speed;
1115 
1116 		value = lat_ns;
1117 		while (value > E1000_LTRV_VALUE_MASK) {
1118 			scale++;
1119 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1120 		}
1121 		if (scale > E1000_LTRV_SCALE_MAX) {
1122 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1123 			return -E1000_ERR_CONFIG;
1124 		}
1125 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1126 
1127 		/* Determine the maximum latency tolerated by the platform */
1128 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1129 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1130 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1131 
1132 		if (lat_enc > max_ltr_enc) {
1133 			lat_enc = max_ltr_enc;
1134 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1135 		}
1136 
1137 		if (lat_ns) {
1138 			lat_ns *= speed * 1000;
1139 			lat_ns /= 8;
1140 			lat_ns /= 1000000000;
1141 			obff_hwm = (s32)(rxa - lat_ns);
1142 		}
1143 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1144 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1145 			return -E1000_ERR_CONFIG;
1146 		}
1147 	}
1148 
1149 	/* Set Snoop and No-Snoop latencies the same */
1150 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1151 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1152 
1153 	/* Set OBFF high water mark */
1154 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1155 	reg |= obff_hwm;
1156 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1157 
1158 	/* Enable OBFF */
1159 	reg = E1000_READ_REG(hw, E1000_SVCR);
1160 	reg |= E1000_SVCR_OFF_EN;
1161 	/* Always unblock interrupts to the CPU even when the system is
1162 	 * in OBFF mode. This ensures that small round-robin traffic
1163 	 * (like ping) does not get dropped or experience long latency.
1164 	 */
1165 	reg |= E1000_SVCR_OFF_MASKINT;
1166 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1167 
1168 	return E1000_SUCCESS;
1169 }
1170 
1171 /**
1172  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1173  *  @hw: pointer to the HW structure
1174  *  @itr: interrupt throttling rate
1175  *
1176  *  Configure OBFF with the updated interrupt rate.
1177  **/
1178 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1179 {
1180 	u32 svcr;
1181 	s32 timer;
1182 
1183 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1184 
1185 	/* Convert ITR value into microseconds for OBFF timer */
1186 	timer = itr & E1000_ITR_MASK;
1187 	timer = (timer * E1000_ITR_MULT) / 1000;
1188 
1189 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1190 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1191 		return -E1000_ERR_CONFIG;
1192 	}
1193 
1194 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1195 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1196 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1197 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1198 
1199 	return E1000_SUCCESS;
1200 }
1201 
1202 /**
1203  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1204  *  @hw: pointer to the HW structure
1205  *  @to_sx: boolean indicating a system power state transition to Sx
1206  *
1207  *  When link is down, configure ULP mode to significantly reduce the power
1208  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1209  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1210  *  system, configure the ULP mode by software.
1211  */
1212 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1213 {
1214 	u32 mac_reg;
1215 	s32 ret_val = E1000_SUCCESS;
1216 	u16 phy_reg;
1217 
1218 	if ((hw->mac.type < e1000_pch_lpt) ||
1219 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1220 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1221 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1222 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1223 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1224 		return 0;
1225 
1226 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1227 		/* Request ME configure ULP mode in the PHY */
1228 		mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1229 		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1230 		E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1231 
1232 		goto out;
1233 	}
1234 
1235 	if (!to_sx) {
1236 		int i = 0;
1237 
1238 		/* Poll up to 5 seconds for Cable Disconnected indication */
1239 		while (!(E1000_READ_REG(hw, E1000_FEXT) &
1240 			 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1241 			/* Bail if link is re-acquired */
1242 			if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1243 				return -E1000_ERR_PHY;
1244 
1245 			if (i++ == 100)
1246 				break;
1247 
1248 			msec_delay(50);
1249 		}
1250 		DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1251 			 (E1000_READ_REG(hw, E1000_FEXT) &
1252 			  E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1253 			 i * 50);
1254 	}
1255 
1256 	ret_val = hw->phy.ops.acquire(hw);
1257 	if (ret_val)
1258 		goto out;
1259 
1260 	/* Force SMBus mode in PHY */
1261 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1262 	if (ret_val)
1263 		goto release;
1264 	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1265 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1266 
1267 	/* Force SMBus mode in MAC */
1268 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1269 	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1270 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1271 
1272 	/* Set Inband ULP Exit, Reset to SMBus mode and
1273 	 * Disable SMBus Release on PERST# in PHY
1274 	 */
1275 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1276 	if (ret_val)
1277 		goto release;
1278 	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1279 		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1280 	if (to_sx) {
1281 		if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1282 			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1283 
1284 		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1285 	} else {
1286 		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1287 	}
1288 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1289 
1290 	/* Set Disable SMBus Release on PERST# in MAC */
1291 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1292 	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1293 	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1294 
1295 	/* Commit ULP changes in PHY by starting auto ULP configuration */
1296 	phy_reg |= I218_ULP_CONFIG1_START;
1297 	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1298 release:
1299 	hw->phy.ops.release(hw);
1300 out:
1301 	if (ret_val)
1302 		DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1303 	else
1304 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1305 
1306 	return ret_val;
1307 }
1308 
1309 /**
1310  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1311  *  @hw: pointer to the HW structure
1312  *  @force: boolean indicating whether or not to force disabling ULP
1313  *
1314  *  Un-configure ULP mode when link is up, the system is transitioned from
1315  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1316  *  system, poll for an indication from ME that ULP has been un-configured.
1317  *  If not on an ME enabled system, un-configure the ULP mode by software.
1318  *
1319  *  During nominal operation, this function is called when link is acquired
1320  *  to disable ULP mode (force=FALSE); otherwise, for example when unloading
1321  *  the driver or during Sx->S0 transitions, this is called with force=TRUE
1322  *  to forcibly disable ULP.
1323  */
1324 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1325 {
1326 	s32 ret_val = E1000_SUCCESS;
1327 	u32 mac_reg;
1328 	u16 phy_reg;
1329 	int i = 0;
1330 
1331 	if ((hw->mac.type < e1000_pch_lpt) ||
1332 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1333 	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1334 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1335 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1336 	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1337 		return 0;
1338 
1339 	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1340 		if (force) {
1341 			/* Request ME un-configure ULP mode in the PHY */
1342 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1343 			mac_reg &= ~E1000_H2ME_ULP;
1344 			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1345 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1346 		}
1347 
1348 		/* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1349 		while (E1000_READ_REG(hw, E1000_FWSM) &
1350 		       E1000_FWSM_ULP_CFG_DONE) {
1351 			if (i++ == 10) {
1352 				ret_val = -E1000_ERR_PHY;
1353 				goto out;
1354 			}
1355 
1356 			msec_delay(10);
1357 		}
1358 		DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1359 
1360 		if (force) {
1361 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1362 			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1363 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1364 		} else {
1365 			/* Clear H2ME.ULP after ME ULP configuration */
1366 			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1367 			mac_reg &= ~E1000_H2ME_ULP;
1368 			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1369 		}
1370 
1371 		goto out;
1372 	}
1373 
1374 	ret_val = hw->phy.ops.acquire(hw);
1375 	if (ret_val)
1376 		goto out;
1377 
1378 	if (force)
1379 		/* Toggle LANPHYPC Value bit */
1380 		e1000_toggle_lanphypc_pch_lpt(hw);
1381 
1382 	/* Unforce SMBus mode in PHY */
1383 	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1384 	if (ret_val) {
1385 		/* The MAC might be in PCIe mode, so temporarily force to
1386 		 * SMBus mode in order to access the PHY.
1387 		 */
1388 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1389 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1390 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1391 
1392 		msec_delay(50);
1393 
1394 		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1395 						       &phy_reg);
1396 		if (ret_val)
1397 			goto release;
1398 	}
1399 	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1400 	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1401 
1402 	/* Unforce SMBus mode in MAC */
1403 	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1404 	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1405 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1406 
1407 	/* When ULP mode was previously entered, K1 was disabled by the
1408 	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1409 	 */
1410 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1411 	if (ret_val)
1412 		goto release;
1413 	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1414 	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1415 
1416 	/* Clear ULP enabled configuration */
1417 	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1418 	if (ret_val)
1419 		goto release;
1420 		phy_reg &= ~(I218_ULP_CONFIG1_IND |
1421 			     I218_ULP_CONFIG1_STICKY_ULP |
1422 			     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1423 			     I218_ULP_CONFIG1_WOL_HOST |
1424 			     I218_ULP_CONFIG1_INBAND_EXIT |
1425 			     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1426 		e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1427 
1428 		/* Commit ULP changes by starting auto ULP configuration */
1429 		phy_reg |= I218_ULP_CONFIG1_START;
1430 		e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1431 
1432 		/* Clear Disable SMBus Release on PERST# in MAC */
1433 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1434 		mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1435 		E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1436 
1437 release:
1438 	hw->phy.ops.release(hw);
1439 	if (force) {
1440 		hw->phy.ops.reset(hw);
1441 		msec_delay(50);
1442 	}
1443 out:
1444 	if (ret_val)
1445 		DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1446 	else
1447 		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1448 
1449 	return ret_val;
1450 }
1451 
1452 /**
1453  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1454  *  @hw: pointer to the HW structure
1455  *
1456  *  Checks to see of the link status of the hardware has changed.  If a
1457  *  change in link status has been detected, then we read the PHY registers
1458  *  to get the current speed/duplex if link exists.
1459  **/
1460 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1461 {
1462 	struct e1000_mac_info *mac = &hw->mac;
1463 	s32 ret_val;
1464 	bool link;
1465 	u16 phy_reg;
1466 
1467 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1468 
1469 	/* We only want to go out to the PHY registers to see if Auto-Neg
1470 	 * has completed and/or if our link status has changed.  The
1471 	 * get_link_status flag is set upon receiving a Link Status
1472 	 * Change or Rx Sequence Error interrupt.
1473 	 */
1474 	if (!mac->get_link_status)
1475 		return E1000_SUCCESS;
1476 
1477 		/* First we want to see if the MII Status Register reports
1478 		 * link.  If so, then we want to get the current speed/duplex
1479 		 * of the PHY.
1480 		 */
1481 		ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1482 		if (ret_val)
1483 			return ret_val;
1484 
1485 	if (hw->mac.type == e1000_pchlan) {
1486 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1487 		if (ret_val)
1488 			return ret_val;
1489 	}
1490 
1491 	/* When connected at 10Mbps half-duplex, some parts are excessively
1492 	 * aggressive resulting in many collisions. To avoid this, increase
1493 	 * the IPG and reduce Rx latency in the PHY.
1494 	 */
1495 	if (((hw->mac.type == e1000_pch2lan) ||
1496 	     (hw->mac.type == e1000_pch_lpt)) && link) {
1497 		u32 reg;
1498 		reg = E1000_READ_REG(hw, E1000_STATUS);
1499 		if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1500 			u16 emi_addr;
1501 
1502 			reg = E1000_READ_REG(hw, E1000_TIPG);
1503 			reg &= ~E1000_TIPG_IPGT_MASK;
1504 			reg |= 0xFF;
1505 			E1000_WRITE_REG(hw, E1000_TIPG, reg);
1506 
1507 			/* Reduce Rx latency in analog PHY */
1508 			ret_val = hw->phy.ops.acquire(hw);
1509 			if (ret_val)
1510 				return ret_val;
1511 
1512 			if (hw->mac.type == e1000_pch2lan)
1513 				emi_addr = I82579_RX_CONFIG;
1514 			else
1515 				emi_addr = I217_RX_CONFIG;
1516 			ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1517 
1518 			hw->phy.ops.release(hw);
1519 
1520 			if (ret_val)
1521 				return ret_val;
1522 		}
1523 	}
1524 
1525 	/* Work-around I218 hang issue */
1526 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1527 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1528 	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1529 	    (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1530 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1531 		if (ret_val)
1532 			return ret_val;
1533 	}
1534 	if (hw->mac.type == e1000_pch_lpt) {
1535 		/* Set platform power management values for
1536 		 * Latency Tolerance Reporting (LTR)
1537 		 * Optimized Buffer Flush/Fill (OBFF)
1538 		 */
1539 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1540 		if (ret_val)
1541 			return ret_val;
1542 	}
1543 
1544 	/* Clear link partner's EEE ability */
1545 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1546 
1547 	if (!link)
1548 		return E1000_SUCCESS; /* No link detected */
1549 
1550 	mac->get_link_status = FALSE;
1551 
1552 	switch (hw->mac.type) {
1553 	case e1000_pch2lan:
1554 		ret_val = e1000_k1_workaround_lv(hw);
1555 		if (ret_val)
1556 			return ret_val;
1557 		/* fall-thru */
1558 	case e1000_pchlan:
1559 		if (hw->phy.type == e1000_phy_82578) {
1560 			ret_val = e1000_link_stall_workaround_hv(hw);
1561 			if (ret_val)
1562 				return ret_val;
1563 		}
1564 
1565 		/* Workaround for PCHx parts in half-duplex:
1566 		 * Set the number of preambles removed from the packet
1567 		 * when it is passed from the PHY to the MAC to prevent
1568 		 * the MAC from misinterpreting the packet type.
1569 		 */
1570 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1571 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1572 
1573 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1574 		    E1000_STATUS_FD)
1575 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1576 
1577 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1578 		break;
1579 	default:
1580 		break;
1581 	}
1582 
1583 	/* Check if there was DownShift, must be checked
1584 	 * immediately after link-up
1585 	 */
1586 	e1000_check_downshift_generic(hw);
1587 
1588 	/* Enable/Disable EEE after link up */
1589 	if (hw->phy.type > e1000_phy_82579) {
1590 		ret_val = e1000_set_eee_pchlan(hw);
1591 		if (ret_val)
1592 			return ret_val;
1593 	}
1594 
1595 	/* If we are forcing speed/duplex, then we simply return since
1596 	 * we have already determined whether we have link or not.
1597 	 */
1598 	if (!mac->autoneg)
1599 		return -E1000_ERR_CONFIG;
1600 
1601 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1602 	 * of MAC speed/duplex configuration.  So we only need to
1603 	 * configure Collision Distance in the MAC.
1604 	 */
1605 	mac->ops.config_collision_dist(hw);
1606 
1607 	/* Configure Flow Control now that Auto-Neg has completed.
1608 	 * First, we need to restore the desired flow control
1609 	 * settings because we may have had to re-autoneg with a
1610 	 * different link partner.
1611 	 */
1612 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1613 	if (ret_val)
1614 		DEBUGOUT("Error configuring flow control\n");
1615 
1616 	return ret_val;
1617 }
1618 
1619 /**
1620  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1621  *  @hw: pointer to the HW structure
1622  *
1623  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1624  **/
1625 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1626 {
1627 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1628 
1629 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1630 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1631 	switch (hw->mac.type) {
1632 	case e1000_ich8lan:
1633 	case e1000_ich9lan:
1634 	case e1000_ich10lan:
1635 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1636 		break;
1637 	case e1000_pchlan:
1638 	case e1000_pch2lan:
1639 	case e1000_pch_lpt:
1640 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1641 		break;
1642 	default:
1643 		break;
1644 	}
1645 }
1646 
1647 /**
1648  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1649  *  @hw: pointer to the HW structure
1650  *
1651  *  Acquires the mutex for performing NVM operations.
1652  **/
1653 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1654 {
1655 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1656 	return E1000_SUCCESS;
1657 }
1658 
1659 /**
1660  *  e1000_release_nvm_ich8lan - Release NVM mutex
1661  *  @hw: pointer to the HW structure
1662  *
1663  *  Releases the mutex used while performing NVM operations.
1664  **/
1665 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1666 {
1667 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1668 	return;
1669 }
1670 
1671 /**
1672  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1673  *  @hw: pointer to the HW structure
1674  *
1675  *  Acquires the software control flag for performing PHY and select
1676  *  MAC CSR accesses.
1677  **/
1678 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1679 {
1680 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1681 	s32 ret_val = E1000_SUCCESS;
1682 
1683 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1684 
1685 	while (timeout) {
1686 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1687 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1688 			break;
1689 
1690 		msec_delay_irq(1);
1691 		timeout--;
1692 	}
1693 
1694 	if (!timeout) {
1695 		DEBUGOUT("SW has already locked the resource.\n");
1696 		ret_val = -E1000_ERR_CONFIG;
1697 		goto out;
1698 	}
1699 
1700 	timeout = SW_FLAG_TIMEOUT;
1701 
1702 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1703 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1704 
1705 	while (timeout) {
1706 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1707 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1708 			break;
1709 
1710 		msec_delay_irq(1);
1711 		timeout--;
1712 	}
1713 
1714 	if (!timeout) {
1715 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1716 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1717 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1718 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1719 		ret_val = -E1000_ERR_CONFIG;
1720 		goto out;
1721 	}
1722 
1723 out:
1724 	return ret_val;
1725 }
1726 
1727 /**
1728  *  e1000_release_swflag_ich8lan - Release software control flag
1729  *  @hw: pointer to the HW structure
1730  *
1731  *  Releases the software control flag for performing PHY and select
1732  *  MAC CSR accesses.
1733  **/
1734 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1735 {
1736 	u32 extcnf_ctrl;
1737 
1738 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1739 
1740 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1741 
1742 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1743 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1744 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1745 	} else {
1746 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1747 	}
1748 	return;
1749 }
1750 
1751 /**
1752  *  e1000_check_mng_mode_ich8lan - Checks management mode
1753  *  @hw: pointer to the HW structure
1754  *
1755  *  This checks if the adapter has any manageability enabled.
1756  *  This is a function pointer entry point only called by read/write
1757  *  routines for the PHY and NVM parts.
1758  **/
1759 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1760 {
1761 	u32 fwsm;
1762 
1763 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1764 
1765 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1766 
1767 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1768 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1769 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1770 }
1771 
1772 /**
1773  *  e1000_check_mng_mode_pchlan - Checks management mode
1774  *  @hw: pointer to the HW structure
1775  *
1776  *  This checks if the adapter has iAMT enabled.
1777  *  This is a function pointer entry point only called by read/write
1778  *  routines for the PHY and NVM parts.
1779  **/
1780 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1781 {
1782 	u32 fwsm;
1783 
1784 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1785 
1786 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1787 
1788 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1789 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1790 }
1791 
1792 /**
1793  *  e1000_rar_set_pch2lan - Set receive address register
1794  *  @hw: pointer to the HW structure
1795  *  @addr: pointer to the receive address
1796  *  @index: receive address array register
1797  *
1798  *  Sets the receive address array register at index to the address passed
1799  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1800  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1801  *  Use SHRA[0-3] in place of those reserved for ME.
1802  **/
1803 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1804 {
1805 	u32 rar_low, rar_high;
1806 
1807 	DEBUGFUNC("e1000_rar_set_pch2lan");
1808 
1809 	/* HW expects these in little endian so we reverse the byte order
1810 	 * from network order (big endian) to little endian
1811 	 */
1812 	rar_low = ((u32) addr[0] |
1813 		   ((u32) addr[1] << 8) |
1814 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1815 
1816 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1817 
1818 	/* If MAC address zero, no need to set the AV bit */
1819 	if (rar_low || rar_high)
1820 		rar_high |= E1000_RAH_AV;
1821 
1822 	if (index == 0) {
1823 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1824 		E1000_WRITE_FLUSH(hw);
1825 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1826 		E1000_WRITE_FLUSH(hw);
1827 		return E1000_SUCCESS;
1828 	}
1829 
1830 	/* RAR[1-6] are owned by manageability.  Skip those and program the
1831 	 * next address into the SHRA register array.
1832 	 */
1833 	if (index < (u32) (hw->mac.rar_entry_count)) {
1834 		s32 ret_val;
1835 
1836 		ret_val = e1000_acquire_swflag_ich8lan(hw);
1837 		if (ret_val)
1838 			goto out;
1839 
1840 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1841 		E1000_WRITE_FLUSH(hw);
1842 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1843 		E1000_WRITE_FLUSH(hw);
1844 
1845 		e1000_release_swflag_ich8lan(hw);
1846 
1847 		/* verify the register updates */
1848 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1849 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1850 			return E1000_SUCCESS;
1851 
1852 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1853 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1854 	}
1855 
1856 out:
1857 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1858 	return -E1000_ERR_CONFIG;
1859 }
1860 
1861 /**
1862  *  e1000_rar_set_pch_lpt - Set receive address registers
1863  *  @hw: pointer to the HW structure
1864  *  @addr: pointer to the receive address
1865  *  @index: receive address array register
1866  *
1867  *  Sets the receive address register array at index to the address passed
1868  *  in by addr. For LPT, RAR[0] is the base address register that is to
1869  *  contain the MAC address. SHRA[0-10] are the shared receive address
1870  *  registers that are shared between the Host and manageability engine (ME).
1871  **/
1872 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1873 {
1874 	u32 rar_low, rar_high;
1875 	u32 wlock_mac;
1876 
1877 	DEBUGFUNC("e1000_rar_set_pch_lpt");
1878 
1879 	/* HW expects these in little endian so we reverse the byte order
1880 	 * from network order (big endian) to little endian
1881 	 */
1882 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1883 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1884 
1885 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1886 
1887 	/* If MAC address zero, no need to set the AV bit */
1888 	if (rar_low || rar_high)
1889 		rar_high |= E1000_RAH_AV;
1890 
1891 	if (index == 0) {
1892 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1893 		E1000_WRITE_FLUSH(hw);
1894 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1895 		E1000_WRITE_FLUSH(hw);
1896 		return E1000_SUCCESS;
1897 	}
1898 
1899 	/* The manageability engine (ME) can lock certain SHRAR registers that
1900 	 * it is using - those registers are unavailable for use.
1901 	 */
1902 	if (index < hw->mac.rar_entry_count) {
1903 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1904 			    E1000_FWSM_WLOCK_MAC_MASK;
1905 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1906 
1907 		/* Check if all SHRAR registers are locked */
1908 		if (wlock_mac == 1)
1909 			goto out;
1910 
1911 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
1912 			s32 ret_val;
1913 
1914 			ret_val = e1000_acquire_swflag_ich8lan(hw);
1915 
1916 			if (ret_val)
1917 				goto out;
1918 
1919 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1920 					rar_low);
1921 			E1000_WRITE_FLUSH(hw);
1922 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1923 					rar_high);
1924 			E1000_WRITE_FLUSH(hw);
1925 
1926 			e1000_release_swflag_ich8lan(hw);
1927 
1928 			/* verify the register updates */
1929 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1930 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1931 				return E1000_SUCCESS;
1932 		}
1933 	}
1934 
1935 out:
1936 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1937 	return -E1000_ERR_CONFIG;
1938 }
1939 
1940 /**
1941  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1942  *  @hw: pointer to the HW structure
1943  *  @mc_addr_list: array of multicast addresses to program
1944  *  @mc_addr_count: number of multicast addresses to program
1945  *
1946  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1947  *  The caller must have a packed mc_addr_list of multicast addresses.
1948  **/
1949 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1950 					      u8 *mc_addr_list,
1951 					      u32 mc_addr_count)
1952 {
1953 	u16 phy_reg = 0;
1954 	int i;
1955 	s32 ret_val;
1956 
1957 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1958 
1959 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1960 
1961 	ret_val = hw->phy.ops.acquire(hw);
1962 	if (ret_val)
1963 		return;
1964 
1965 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1966 	if (ret_val)
1967 		goto release;
1968 
1969 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
1970 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1971 					   (u16)(hw->mac.mta_shadow[i] &
1972 						 0xFFFF));
1973 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1974 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
1975 						 0xFFFF));
1976 	}
1977 
1978 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1979 
1980 release:
1981 	hw->phy.ops.release(hw);
1982 }
1983 
1984 /**
1985  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1986  *  @hw: pointer to the HW structure
1987  *
1988  *  Checks if firmware is blocking the reset of the PHY.
1989  *  This is a function pointer entry point only called by
1990  *  reset routines.
1991  **/
1992 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1993 {
1994 	u32 fwsm;
1995 	bool blocked = FALSE;
1996 	int i = 0;
1997 
1998 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
1999 
2000 	do {
2001 		fwsm = E1000_READ_REG(hw, E1000_FWSM);
2002 		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2003 			blocked = TRUE;
2004 			msec_delay(10);
2005 			continue;
2006 		}
2007 		blocked = FALSE;
2008 	} while (blocked && (i++ < 10));
2009 	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2010 }
2011 
2012 /**
2013  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2014  *  @hw: pointer to the HW structure
2015  *
2016  *  Assumes semaphore already acquired.
2017  *
2018  **/
2019 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2020 {
2021 	u16 phy_data;
2022 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2023 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2024 		E1000_STRAP_SMT_FREQ_SHIFT;
2025 	s32 ret_val;
2026 
2027 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2028 
2029 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2030 	if (ret_val)
2031 		return ret_val;
2032 
2033 	phy_data &= ~HV_SMB_ADDR_MASK;
2034 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2035 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2036 
2037 	if (hw->phy.type == e1000_phy_i217) {
2038 		/* Restore SMBus frequency */
2039 		if (freq--) {
2040 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2041 			phy_data |= (freq & (1 << 0)) <<
2042 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
2043 			phy_data |= (freq & (1 << 1)) <<
2044 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2045 		} else {
2046 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
2047 		}
2048 	}
2049 
2050 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2051 }
2052 
2053 /**
2054  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2055  *  @hw:   pointer to the HW structure
2056  *
2057  *  SW should configure the LCD from the NVM extended configuration region
2058  *  as a workaround for certain parts.
2059  **/
2060 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2061 {
2062 	struct e1000_phy_info *phy = &hw->phy;
2063 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2064 	s32 ret_val = E1000_SUCCESS;
2065 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2066 
2067 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2068 
2069 	/* Initialize the PHY from the NVM on ICH platforms.  This
2070 	 * is needed due to an issue where the NVM configuration is
2071 	 * not properly autoloaded after power transitions.
2072 	 * Therefore, after each PHY reset, we will load the
2073 	 * configuration data out of the NVM manually.
2074 	 */
2075 	switch (hw->mac.type) {
2076 	case e1000_ich8lan:
2077 		if (phy->type != e1000_phy_igp_3)
2078 			return ret_val;
2079 
2080 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2081 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2082 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2083 			break;
2084 		}
2085 		/* Fall-thru */
2086 	case e1000_pchlan:
2087 	case e1000_pch2lan:
2088 	case e1000_pch_lpt:
2089 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2090 		break;
2091 	default:
2092 		return ret_val;
2093 	}
2094 
2095 	ret_val = hw->phy.ops.acquire(hw);
2096 	if (ret_val)
2097 		return ret_val;
2098 
2099 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
2100 	if (!(data & sw_cfg_mask))
2101 		goto release;
2102 
2103 	/* Make sure HW does not configure LCD from PHY
2104 	 * extended configuration before SW configuration
2105 	 */
2106 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2107 	if ((hw->mac.type < e1000_pch2lan) &&
2108 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2109 			goto release;
2110 
2111 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2112 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2113 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2114 	if (!cnf_size)
2115 		goto release;
2116 
2117 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2118 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2119 
2120 	if (((hw->mac.type == e1000_pchlan) &&
2121 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2122 	    (hw->mac.type > e1000_pchlan)) {
2123 		/* HW configures the SMBus address and LEDs when the
2124 		 * OEM and LCD Write Enable bits are set in the NVM.
2125 		 * When both NVM bits are cleared, SW will configure
2126 		 * them instead.
2127 		 */
2128 		ret_val = e1000_write_smbus_addr(hw);
2129 		if (ret_val)
2130 			goto release;
2131 
2132 		data = E1000_READ_REG(hw, E1000_LEDCTL);
2133 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2134 							(u16)data);
2135 		if (ret_val)
2136 			goto release;
2137 	}
2138 
2139 	/* Configure LCD from extended configuration region. */
2140 
2141 	/* cnf_base_addr is in DWORD */
2142 	word_addr = (u16)(cnf_base_addr << 1);
2143 
2144 	for (i = 0; i < cnf_size; i++) {
2145 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2146 					   &reg_data);
2147 		if (ret_val)
2148 			goto release;
2149 
2150 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2151 					   1, &reg_addr);
2152 		if (ret_val)
2153 			goto release;
2154 
2155 		/* Save off the PHY page for future writes. */
2156 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2157 			phy_page = reg_data;
2158 			continue;
2159 		}
2160 
2161 		reg_addr &= PHY_REG_MASK;
2162 		reg_addr |= phy_page;
2163 
2164 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2165 						    reg_data);
2166 		if (ret_val)
2167 			goto release;
2168 	}
2169 
2170 release:
2171 	hw->phy.ops.release(hw);
2172 	return ret_val;
2173 }
2174 
2175 /**
2176  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2177  *  @hw:   pointer to the HW structure
2178  *  @link: link up bool flag
2179  *
2180  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2181  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2182  *  If link is down, the function will restore the default K1 setting located
2183  *  in the NVM.
2184  **/
2185 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2186 {
2187 	s32 ret_val = E1000_SUCCESS;
2188 	u16 status_reg = 0;
2189 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2190 
2191 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
2192 
2193 	if (hw->mac.type != e1000_pchlan)
2194 		return E1000_SUCCESS;
2195 
2196 	/* Wrap the whole flow with the sw flag */
2197 	ret_val = hw->phy.ops.acquire(hw);
2198 	if (ret_val)
2199 		return ret_val;
2200 
2201 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2202 	if (link) {
2203 		if (hw->phy.type == e1000_phy_82578) {
2204 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2205 							      &status_reg);
2206 			if (ret_val)
2207 				goto release;
2208 
2209 			status_reg &= (BM_CS_STATUS_LINK_UP |
2210 				       BM_CS_STATUS_RESOLVED |
2211 				       BM_CS_STATUS_SPEED_MASK);
2212 
2213 			if (status_reg == (BM_CS_STATUS_LINK_UP |
2214 					   BM_CS_STATUS_RESOLVED |
2215 					   BM_CS_STATUS_SPEED_1000))
2216 				k1_enable = FALSE;
2217 		}
2218 
2219 		if (hw->phy.type == e1000_phy_82577) {
2220 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2221 							      &status_reg);
2222 			if (ret_val)
2223 				goto release;
2224 
2225 			status_reg &= (HV_M_STATUS_LINK_UP |
2226 				       HV_M_STATUS_AUTONEG_COMPLETE |
2227 				       HV_M_STATUS_SPEED_MASK);
2228 
2229 			if (status_reg == (HV_M_STATUS_LINK_UP |
2230 					   HV_M_STATUS_AUTONEG_COMPLETE |
2231 					   HV_M_STATUS_SPEED_1000))
2232 				k1_enable = FALSE;
2233 		}
2234 
2235 		/* Link stall fix for link up */
2236 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2237 						       0x0100);
2238 		if (ret_val)
2239 			goto release;
2240 
2241 	} else {
2242 		/* Link stall fix for link down */
2243 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2244 						       0x4100);
2245 		if (ret_val)
2246 			goto release;
2247 	}
2248 
2249 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2250 
2251 release:
2252 	hw->phy.ops.release(hw);
2253 
2254 	return ret_val;
2255 }
2256 
2257 /**
2258  *  e1000_configure_k1_ich8lan - Configure K1 power state
2259  *  @hw: pointer to the HW structure
2260  *  @enable: K1 state to configure
2261  *
2262  *  Configure the K1 power state based on the provided parameter.
2263  *  Assumes semaphore already acquired.
2264  *
2265  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2266  **/
2267 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2268 {
2269 	s32 ret_val;
2270 	u32 ctrl_reg = 0;
2271 	u32 ctrl_ext = 0;
2272 	u32 reg = 0;
2273 	u16 kmrn_reg = 0;
2274 
2275 	DEBUGFUNC("e1000_configure_k1_ich8lan");
2276 
2277 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2278 					     &kmrn_reg);
2279 	if (ret_val)
2280 		return ret_val;
2281 
2282 	if (k1_enable)
2283 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2284 	else
2285 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2286 
2287 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2288 					      kmrn_reg);
2289 	if (ret_val)
2290 		return ret_val;
2291 
2292 	usec_delay(20);
2293 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2294 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2295 
2296 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2297 	reg |= E1000_CTRL_FRCSPD;
2298 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2299 
2300 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2301 	E1000_WRITE_FLUSH(hw);
2302 	usec_delay(20);
2303 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2304 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2305 	E1000_WRITE_FLUSH(hw);
2306 	usec_delay(20);
2307 
2308 	return E1000_SUCCESS;
2309 }
2310 
2311 /**
2312  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2313  *  @hw:       pointer to the HW structure
2314  *  @d0_state: boolean if entering d0 or d3 device state
2315  *
2316  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2317  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2318  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2319  **/
2320 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2321 {
2322 	s32 ret_val = 0;
2323 	u32 mac_reg;
2324 	u16 oem_reg;
2325 
2326 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2327 
2328 	if (hw->mac.type < e1000_pchlan)
2329 		return ret_val;
2330 
2331 	ret_val = hw->phy.ops.acquire(hw);
2332 	if (ret_val)
2333 		return ret_val;
2334 
2335 	if (hw->mac.type == e1000_pchlan) {
2336 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2337 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2338 			goto release;
2339 	}
2340 
2341 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2342 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2343 		goto release;
2344 
2345 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2346 
2347 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2348 	if (ret_val)
2349 		goto release;
2350 
2351 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2352 
2353 	if (d0_state) {
2354 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2355 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2356 
2357 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2358 			oem_reg |= HV_OEM_BITS_LPLU;
2359 	} else {
2360 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2361 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2362 			oem_reg |= HV_OEM_BITS_GBE_DIS;
2363 
2364 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2365 		    E1000_PHY_CTRL_NOND0A_LPLU))
2366 			oem_reg |= HV_OEM_BITS_LPLU;
2367 	}
2368 
2369 	/* Set Restart auto-neg to activate the bits */
2370 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2371 	    !hw->phy.ops.check_reset_block(hw))
2372 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2373 
2374 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2375 
2376 release:
2377 	hw->phy.ops.release(hw);
2378 
2379 	return ret_val;
2380 }
2381 
2382 
2383 /**
2384  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2385  *  @hw:   pointer to the HW structure
2386  **/
2387 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2388 {
2389 	s32 ret_val;
2390 	u16 data;
2391 
2392 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2393 
2394 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2395 	if (ret_val)
2396 		return ret_val;
2397 
2398 	data |= HV_KMRN_MDIO_SLOW;
2399 
2400 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2401 
2402 	return ret_val;
2403 }
2404 
2405 /**
2406  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2407  *  done after every PHY reset.
2408  **/
2409 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2410 {
2411 	s32 ret_val = E1000_SUCCESS;
2412 	u16 phy_data;
2413 
2414 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2415 
2416 	if (hw->mac.type != e1000_pchlan)
2417 		return E1000_SUCCESS;
2418 
2419 	/* Set MDIO slow mode before any other MDIO access */
2420 	if (hw->phy.type == e1000_phy_82577) {
2421 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2422 		if (ret_val)
2423 			return ret_val;
2424 	}
2425 
2426 	if (((hw->phy.type == e1000_phy_82577) &&
2427 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2428 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2429 		/* Disable generation of early preamble */
2430 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2431 		if (ret_val)
2432 			return ret_val;
2433 
2434 		/* Preamble tuning for SSC */
2435 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2436 						0xA204);
2437 		if (ret_val)
2438 			return ret_val;
2439 	}
2440 
2441 	if (hw->phy.type == e1000_phy_82578) {
2442 		/* Return registers to default by doing a soft reset then
2443 		 * writing 0x3140 to the control register.
2444 		 */
2445 		if (hw->phy.revision < 2) {
2446 			e1000_phy_sw_reset_generic(hw);
2447 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2448 							0x3140);
2449 		}
2450 	}
2451 
2452 	/* Select page 0 */
2453 	ret_val = hw->phy.ops.acquire(hw);
2454 	if (ret_val)
2455 		return ret_val;
2456 
2457 	hw->phy.addr = 1;
2458 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2459 	hw->phy.ops.release(hw);
2460 	if (ret_val)
2461 		return ret_val;
2462 
2463 	/* Configure the K1 Si workaround during phy reset assuming there is
2464 	 * link so that it disables K1 if link is in 1Gbps.
2465 	 */
2466 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2467 	if (ret_val)
2468 		return ret_val;
2469 
2470 	/* Workaround for link disconnects on a busy hub in half duplex */
2471 	ret_val = hw->phy.ops.acquire(hw);
2472 	if (ret_val)
2473 		return ret_val;
2474 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2475 	if (ret_val)
2476 		goto release;
2477 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2478 					       phy_data & 0x00FF);
2479 	if (ret_val)
2480 		goto release;
2481 
2482 	/* set MSE higher to enable link to stay up when noise is high */
2483 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2484 release:
2485 	hw->phy.ops.release(hw);
2486 
2487 	return ret_val;
2488 }
2489 
2490 /**
2491  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2492  *  @hw:   pointer to the HW structure
2493  **/
2494 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2495 {
2496 	u32 mac_reg;
2497 	u16 i, phy_reg = 0;
2498 	s32 ret_val;
2499 
2500 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2501 
2502 	ret_val = hw->phy.ops.acquire(hw);
2503 	if (ret_val)
2504 		return;
2505 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2506 	if (ret_val)
2507 		goto release;
2508 
2509 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2510 	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2511 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2512 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2513 					   (u16)(mac_reg & 0xFFFF));
2514 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2515 					   (u16)((mac_reg >> 16) & 0xFFFF));
2516 
2517 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2518 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2519 					   (u16)(mac_reg & 0xFFFF));
2520 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2521 					   (u16)((mac_reg & E1000_RAH_AV)
2522 						 >> 16));
2523 	}
2524 
2525 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2526 
2527 release:
2528 	hw->phy.ops.release(hw);
2529 }
2530 
2531 static u32 e1000_calc_rx_da_crc(u8 mac[])
2532 {
2533 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2534 	u32 i, j, mask, crc;
2535 
2536 	DEBUGFUNC("e1000_calc_rx_da_crc");
2537 
2538 	crc = 0xffffffff;
2539 	for (i = 0; i < 6; i++) {
2540 		crc = crc ^ mac[i];
2541 		for (j = 8; j > 0; j--) {
2542 			mask = (crc & 1) * (-1);
2543 			crc = (crc >> 1) ^ (poly & mask);
2544 		}
2545 	}
2546 	return ~crc;
2547 }
2548 
2549 /**
2550  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2551  *  with 82579 PHY
2552  *  @hw: pointer to the HW structure
2553  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2554  **/
2555 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2556 {
2557 	s32 ret_val = E1000_SUCCESS;
2558 	u16 phy_reg, data;
2559 	u32 mac_reg;
2560 	u16 i;
2561 
2562 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2563 
2564 	if (hw->mac.type < e1000_pch2lan)
2565 		return E1000_SUCCESS;
2566 
2567 	/* disable Rx path while enabling/disabling workaround */
2568 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2569 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2570 					phy_reg | (1 << 14));
2571 	if (ret_val)
2572 		return ret_val;
2573 
2574 	if (enable) {
2575 		/* Write Rx addresses (rar_entry_count for RAL/H, and
2576 		 * SHRAL/H) and initial CRC values to the MAC
2577 		 */
2578 		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2579 			u8 mac_addr[ETH_ADDR_LEN] = {0};
2580 			u32 addr_high, addr_low;
2581 
2582 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2583 			if (!(addr_high & E1000_RAH_AV))
2584 				continue;
2585 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2586 			mac_addr[0] = (addr_low & 0xFF);
2587 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2588 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2589 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2590 			mac_addr[4] = (addr_high & 0xFF);
2591 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2592 
2593 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2594 					e1000_calc_rx_da_crc(mac_addr));
2595 		}
2596 
2597 		/* Write Rx addresses to the PHY */
2598 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2599 
2600 		/* Enable jumbo frame workaround in the MAC */
2601 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2602 		mac_reg &= ~(1 << 14);
2603 		mac_reg |= (7 << 15);
2604 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2605 
2606 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2607 		mac_reg |= E1000_RCTL_SECRC;
2608 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2609 
2610 		ret_val = e1000_read_kmrn_reg_generic(hw,
2611 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2612 						&data);
2613 		if (ret_val)
2614 			return ret_val;
2615 		ret_val = e1000_write_kmrn_reg_generic(hw,
2616 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2617 						data | (1 << 0));
2618 		if (ret_val)
2619 			return ret_val;
2620 		ret_val = e1000_read_kmrn_reg_generic(hw,
2621 						E1000_KMRNCTRLSTA_HD_CTRL,
2622 						&data);
2623 		if (ret_val)
2624 			return ret_val;
2625 		data &= ~(0xF << 8);
2626 		data |= (0xB << 8);
2627 		ret_val = e1000_write_kmrn_reg_generic(hw,
2628 						E1000_KMRNCTRLSTA_HD_CTRL,
2629 						data);
2630 		if (ret_val)
2631 			return ret_val;
2632 
2633 		/* Enable jumbo frame workaround in the PHY */
2634 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2635 		data &= ~(0x7F << 5);
2636 		data |= (0x37 << 5);
2637 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2638 		if (ret_val)
2639 			return ret_val;
2640 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2641 		data &= ~(1 << 13);
2642 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2643 		if (ret_val)
2644 			return ret_val;
2645 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2646 		data &= ~(0x3FF << 2);
2647 		data |= (E1000_TX_PTR_GAP << 2);
2648 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2649 		if (ret_val)
2650 			return ret_val;
2651 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2652 		if (ret_val)
2653 			return ret_val;
2654 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2655 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2656 						(1 << 10));
2657 		if (ret_val)
2658 			return ret_val;
2659 	} else {
2660 		/* Write MAC register values back to h/w defaults */
2661 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2662 		mac_reg &= ~(0xF << 14);
2663 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2664 
2665 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2666 		mac_reg &= ~E1000_RCTL_SECRC;
2667 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2668 
2669 		ret_val = e1000_read_kmrn_reg_generic(hw,
2670 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2671 						&data);
2672 		if (ret_val)
2673 			return ret_val;
2674 		ret_val = e1000_write_kmrn_reg_generic(hw,
2675 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2676 						data & ~(1 << 0));
2677 		if (ret_val)
2678 			return ret_val;
2679 		ret_val = e1000_read_kmrn_reg_generic(hw,
2680 						E1000_KMRNCTRLSTA_HD_CTRL,
2681 						&data);
2682 		if (ret_val)
2683 			return ret_val;
2684 		data &= ~(0xF << 8);
2685 		data |= (0xB << 8);
2686 		ret_val = e1000_write_kmrn_reg_generic(hw,
2687 						E1000_KMRNCTRLSTA_HD_CTRL,
2688 						data);
2689 		if (ret_val)
2690 			return ret_val;
2691 
2692 		/* Write PHY register values back to h/w defaults */
2693 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2694 		data &= ~(0x7F << 5);
2695 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2696 		if (ret_val)
2697 			return ret_val;
2698 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2699 		data |= (1 << 13);
2700 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2701 		if (ret_val)
2702 			return ret_val;
2703 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2704 		data &= ~(0x3FF << 2);
2705 		data |= (0x8 << 2);
2706 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2707 		if (ret_val)
2708 			return ret_val;
2709 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2710 		if (ret_val)
2711 			return ret_val;
2712 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2713 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2714 						~(1 << 10));
2715 		if (ret_val)
2716 			return ret_val;
2717 	}
2718 
2719 	/* re-enable Rx path after enabling/disabling workaround */
2720 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2721 				     ~(1 << 14));
2722 }
2723 
2724 /**
2725  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2726  *  done after every PHY reset.
2727  **/
2728 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2729 {
2730 	s32 ret_val = E1000_SUCCESS;
2731 
2732 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2733 
2734 	if (hw->mac.type != e1000_pch2lan)
2735 		return E1000_SUCCESS;
2736 
2737 	/* Set MDIO slow mode before any other MDIO access */
2738 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2739 	if (ret_val)
2740 		return ret_val;
2741 
2742 	ret_val = hw->phy.ops.acquire(hw);
2743 	if (ret_val)
2744 		return ret_val;
2745 	/* set MSE higher to enable link to stay up when noise is high */
2746 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2747 	if (ret_val)
2748 		goto release;
2749 	/* drop link after 5 times MSE threshold was reached */
2750 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2751 release:
2752 	hw->phy.ops.release(hw);
2753 
2754 	return ret_val;
2755 }
2756 
2757 /**
2758  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2759  *  @hw:   pointer to the HW structure
2760  *
2761  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2762  *  Disable K1 for 1000 and 100 speeds
2763  **/
2764 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2765 {
2766 	s32 ret_val = E1000_SUCCESS;
2767 	u16 status_reg = 0;
2768 
2769 	DEBUGFUNC("e1000_k1_workaround_lv");
2770 
2771 	if (hw->mac.type != e1000_pch2lan)
2772 		return E1000_SUCCESS;
2773 
2774 	/* Set K1 beacon duration based on 10Mbs speed */
2775 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2776 	if (ret_val)
2777 		return ret_val;
2778 
2779 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2780 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2781 		if (status_reg &
2782 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2783 			u16 pm_phy_reg;
2784 
2785 			/* LV 1G/100 Packet drop issue wa  */
2786 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2787 						       &pm_phy_reg);
2788 			if (ret_val)
2789 				return ret_val;
2790 			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2791 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2792 							pm_phy_reg);
2793 			if (ret_val)
2794 				return ret_val;
2795 		} else {
2796 			u32 mac_reg;
2797 			mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2798 			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2799 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2800 			E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2801 		}
2802 	}
2803 
2804 	return ret_val;
2805 }
2806 
2807 /**
2808  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2809  *  @hw:   pointer to the HW structure
2810  *  @gate: boolean set to TRUE to gate, FALSE to ungate
2811  *
2812  *  Gate/ungate the automatic PHY configuration via hardware; perform
2813  *  the configuration via software instead.
2814  **/
2815 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2816 {
2817 	u32 extcnf_ctrl;
2818 
2819 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2820 
2821 	if (hw->mac.type < e1000_pch2lan)
2822 		return;
2823 
2824 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2825 
2826 	if (gate)
2827 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2828 	else
2829 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2830 
2831 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2832 }
2833 
2834 /**
2835  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2836  *  @hw: pointer to the HW structure
2837  *
2838  *  Check the appropriate indication the MAC has finished configuring the
2839  *  PHY after a software reset.
2840  **/
2841 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2842 {
2843 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2844 
2845 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
2846 
2847 	/* Wait for basic configuration completes before proceeding */
2848 	do {
2849 		data = E1000_READ_REG(hw, E1000_STATUS);
2850 		data &= E1000_STATUS_LAN_INIT_DONE;
2851 		usec_delay(100);
2852 	} while ((!data) && --loop);
2853 
2854 	/* If basic configuration is incomplete before the above loop
2855 	 * count reaches 0, loading the configuration from NVM will
2856 	 * leave the PHY in a bad state possibly resulting in no link.
2857 	 */
2858 	if (loop == 0)
2859 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2860 
2861 	/* Clear the Init Done bit for the next init event */
2862 	data = E1000_READ_REG(hw, E1000_STATUS);
2863 	data &= ~E1000_STATUS_LAN_INIT_DONE;
2864 	E1000_WRITE_REG(hw, E1000_STATUS, data);
2865 }
2866 
2867 /**
2868  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2869  *  @hw: pointer to the HW structure
2870  **/
2871 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2872 {
2873 	s32 ret_val = E1000_SUCCESS;
2874 	u16 reg;
2875 
2876 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2877 
2878 	if (hw->phy.ops.check_reset_block(hw))
2879 		return E1000_SUCCESS;
2880 
2881 	/* Allow time for h/w to get to quiescent state after reset */
2882 	msec_delay(10);
2883 
2884 	/* Perform any necessary post-reset workarounds */
2885 	switch (hw->mac.type) {
2886 	case e1000_pchlan:
2887 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2888 		if (ret_val)
2889 			return ret_val;
2890 		break;
2891 	case e1000_pch2lan:
2892 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2893 		if (ret_val)
2894 			return ret_val;
2895 		break;
2896 	default:
2897 		break;
2898 	}
2899 
2900 	/* Clear the host wakeup bit after lcd reset */
2901 	if (hw->mac.type >= e1000_pchlan) {
2902 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2903 		reg &= ~BM_WUC_HOST_WU_BIT;
2904 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2905 	}
2906 
2907 	/* Configure the LCD with the extended configuration region in NVM */
2908 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
2909 	if (ret_val)
2910 		return ret_val;
2911 
2912 	/* Configure the LCD with the OEM bits in NVM */
2913 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
2914 
2915 	if (hw->mac.type == e1000_pch2lan) {
2916 		/* Ungate automatic PHY configuration on non-managed 82579 */
2917 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
2918 		    E1000_ICH_FWSM_FW_VALID)) {
2919 			msec_delay(10);
2920 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
2921 		}
2922 
2923 		/* Set EEE LPI Update Timer to 200usec */
2924 		ret_val = hw->phy.ops.acquire(hw);
2925 		if (ret_val)
2926 			return ret_val;
2927 		ret_val = e1000_write_emi_reg_locked(hw,
2928 						     I82579_LPI_UPDATE_TIMER,
2929 						     0x1387);
2930 		hw->phy.ops.release(hw);
2931 	}
2932 
2933 	return ret_val;
2934 }
2935 
2936 /**
2937  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2938  *  @hw: pointer to the HW structure
2939  *
2940  *  Resets the PHY
2941  *  This is a function pointer entry point called by drivers
2942  *  or other shared routines.
2943  **/
2944 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2945 {
2946 	s32 ret_val = E1000_SUCCESS;
2947 
2948 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2949 
2950 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
2951 	if ((hw->mac.type == e1000_pch2lan) &&
2952 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2953 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
2954 
2955 	ret_val = e1000_phy_hw_reset_generic(hw);
2956 	if (ret_val)
2957 		return ret_val;
2958 
2959 	return e1000_post_phy_reset_ich8lan(hw);
2960 }
2961 
2962 /**
2963  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2964  *  @hw: pointer to the HW structure
2965  *  @active: TRUE to enable LPLU, FALSE to disable
2966  *
2967  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2968  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2969  *  the phy speed. This function will manually set the LPLU bit and restart
2970  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2971  *  since it configures the same bit.
2972  **/
2973 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2974 {
2975 	s32 ret_val;
2976 	u16 oem_reg;
2977 
2978 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
2979 
2980 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2981 	if (ret_val)
2982 		return ret_val;
2983 
2984 	if (active)
2985 		oem_reg |= HV_OEM_BITS_LPLU;
2986 	else
2987 		oem_reg &= ~HV_OEM_BITS_LPLU;
2988 
2989 	if (!hw->phy.ops.check_reset_block(hw))
2990 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2991 
2992 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2993 }
2994 
2995 /**
2996  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2997  *  @hw: pointer to the HW structure
2998  *  @active: TRUE to enable LPLU, FALSE to disable
2999  *
3000  *  Sets the LPLU D0 state according to the active flag.  When
3001  *  activating LPLU this function also disables smart speed
3002  *  and vice versa.  LPLU will not be activated unless the
3003  *  device autonegotiation advertisement meets standards of
3004  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3005  *  This is a function pointer entry point only called by
3006  *  PHY setup routines.
3007  **/
3008 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3009 {
3010 	struct e1000_phy_info *phy = &hw->phy;
3011 	u32 phy_ctrl;
3012 	s32 ret_val = E1000_SUCCESS;
3013 	u16 data;
3014 
3015 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3016 
3017 	if (phy->type == e1000_phy_ife)
3018 		return E1000_SUCCESS;
3019 
3020 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3021 
3022 	if (active) {
3023 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3024 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3025 
3026 		if (phy->type != e1000_phy_igp_3)
3027 			return E1000_SUCCESS;
3028 
3029 		/* Call gig speed drop workaround on LPLU before accessing
3030 		 * any PHY registers
3031 		 */
3032 		if (hw->mac.type == e1000_ich8lan)
3033 			e1000_gig_downshift_workaround_ich8lan(hw);
3034 
3035 		/* When LPLU is enabled, we should disable SmartSpeed */
3036 		ret_val = phy->ops.read_reg(hw,
3037 					    IGP01E1000_PHY_PORT_CONFIG,
3038 					    &data);
3039 		if (ret_val)
3040 			return ret_val;
3041 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3042 		ret_val = phy->ops.write_reg(hw,
3043 					     IGP01E1000_PHY_PORT_CONFIG,
3044 					     data);
3045 		if (ret_val)
3046 			return ret_val;
3047 	} else {
3048 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3049 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3050 
3051 		if (phy->type != e1000_phy_igp_3)
3052 			return E1000_SUCCESS;
3053 
3054 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3055 		 * during Dx states where the power conservation is most
3056 		 * important.  During driver activity we should enable
3057 		 * SmartSpeed, so performance is maintained.
3058 		 */
3059 		if (phy->smart_speed == e1000_smart_speed_on) {
3060 			ret_val = phy->ops.read_reg(hw,
3061 						    IGP01E1000_PHY_PORT_CONFIG,
3062 						    &data);
3063 			if (ret_val)
3064 				return ret_val;
3065 
3066 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3067 			ret_val = phy->ops.write_reg(hw,
3068 						     IGP01E1000_PHY_PORT_CONFIG,
3069 						     data);
3070 			if (ret_val)
3071 				return ret_val;
3072 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3073 			ret_val = phy->ops.read_reg(hw,
3074 						    IGP01E1000_PHY_PORT_CONFIG,
3075 						    &data);
3076 			if (ret_val)
3077 				return ret_val;
3078 
3079 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3080 			ret_val = phy->ops.write_reg(hw,
3081 						     IGP01E1000_PHY_PORT_CONFIG,
3082 						     data);
3083 			if (ret_val)
3084 				return ret_val;
3085 		}
3086 	}
3087 
3088 	return E1000_SUCCESS;
3089 }
3090 
3091 /**
3092  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3093  *  @hw: pointer to the HW structure
3094  *  @active: TRUE to enable LPLU, FALSE to disable
3095  *
3096  *  Sets the LPLU D3 state according to the active flag.  When
3097  *  activating LPLU this function also disables smart speed
3098  *  and vice versa.  LPLU will not be activated unless the
3099  *  device autonegotiation advertisement meets standards of
3100  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3101  *  This is a function pointer entry point only called by
3102  *  PHY setup routines.
3103  **/
3104 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3105 {
3106 	struct e1000_phy_info *phy = &hw->phy;
3107 	u32 phy_ctrl;
3108 	s32 ret_val = E1000_SUCCESS;
3109 	u16 data;
3110 
3111 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3112 
3113 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3114 
3115 	if (!active) {
3116 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3117 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3118 
3119 		if (phy->type != e1000_phy_igp_3)
3120 			return E1000_SUCCESS;
3121 
3122 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3123 		 * during Dx states where the power conservation is most
3124 		 * important.  During driver activity we should enable
3125 		 * SmartSpeed, so performance is maintained.
3126 		 */
3127 		if (phy->smart_speed == e1000_smart_speed_on) {
3128 			ret_val = phy->ops.read_reg(hw,
3129 						    IGP01E1000_PHY_PORT_CONFIG,
3130 						    &data);
3131 			if (ret_val)
3132 				return ret_val;
3133 
3134 			data |= IGP01E1000_PSCFR_SMART_SPEED;
3135 			ret_val = phy->ops.write_reg(hw,
3136 						     IGP01E1000_PHY_PORT_CONFIG,
3137 						     data);
3138 			if (ret_val)
3139 				return ret_val;
3140 		} else if (phy->smart_speed == e1000_smart_speed_off) {
3141 			ret_val = phy->ops.read_reg(hw,
3142 						    IGP01E1000_PHY_PORT_CONFIG,
3143 						    &data);
3144 			if (ret_val)
3145 				return ret_val;
3146 
3147 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3148 			ret_val = phy->ops.write_reg(hw,
3149 						     IGP01E1000_PHY_PORT_CONFIG,
3150 						     data);
3151 			if (ret_val)
3152 				return ret_val;
3153 		}
3154 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3155 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3156 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3157 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3158 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3159 
3160 		if (phy->type != e1000_phy_igp_3)
3161 			return E1000_SUCCESS;
3162 
3163 		/* Call gig speed drop workaround on LPLU before accessing
3164 		 * any PHY registers
3165 		 */
3166 		if (hw->mac.type == e1000_ich8lan)
3167 			e1000_gig_downshift_workaround_ich8lan(hw);
3168 
3169 		/* When LPLU is enabled, we should disable SmartSpeed */
3170 		ret_val = phy->ops.read_reg(hw,
3171 					    IGP01E1000_PHY_PORT_CONFIG,
3172 					    &data);
3173 		if (ret_val)
3174 			return ret_val;
3175 
3176 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3177 		ret_val = phy->ops.write_reg(hw,
3178 					     IGP01E1000_PHY_PORT_CONFIG,
3179 					     data);
3180 	}
3181 
3182 	return ret_val;
3183 }
3184 
3185 /**
3186  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3187  *  @hw: pointer to the HW structure
3188  *  @bank:  pointer to the variable that returns the active bank
3189  *
3190  *  Reads signature byte from the NVM using the flash access registers.
3191  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3192  **/
3193 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3194 {
3195 	u32 eecd;
3196 	struct e1000_nvm_info *nvm = &hw->nvm;
3197 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3198 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3199 	u8 sig_byte = 0;
3200 	s32 ret_val;
3201 
3202 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3203 
3204 	switch (hw->mac.type) {
3205 	case e1000_ich8lan:
3206 	case e1000_ich9lan:
3207 		eecd = E1000_READ_REG(hw, E1000_EECD);
3208 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3209 		    E1000_EECD_SEC1VAL_VALID_MASK) {
3210 			if (eecd & E1000_EECD_SEC1VAL)
3211 				*bank = 1;
3212 			else
3213 				*bank = 0;
3214 
3215 			return E1000_SUCCESS;
3216 		}
3217 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3218 		/* fall-thru */
3219 	default:
3220 		/* set bank to 0 in case flash read fails */
3221 		*bank = 0;
3222 
3223 		/* Check bank 0 */
3224 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3225 							&sig_byte);
3226 		if (ret_val)
3227 			return ret_val;
3228 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3229 		    E1000_ICH_NVM_SIG_VALUE) {
3230 			*bank = 0;
3231 			return E1000_SUCCESS;
3232 		}
3233 
3234 		/* Check bank 1 */
3235 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3236 							bank1_offset,
3237 							&sig_byte);
3238 		if (ret_val)
3239 			return ret_val;
3240 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3241 		    E1000_ICH_NVM_SIG_VALUE) {
3242 			*bank = 1;
3243 			return E1000_SUCCESS;
3244 		}
3245 
3246 		DEBUGOUT("ERROR: No valid NVM bank present\n");
3247 		return -E1000_ERR_NVM;
3248 	}
3249 }
3250 
3251 /**
3252  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3253  *  @hw: pointer to the HW structure
3254  *  @offset: The offset (in bytes) of the word(s) to read.
3255  *  @words: Size of data to read in words
3256  *  @data: Pointer to the word(s) to read at offset.
3257  *
3258  *  Reads a word(s) from the NVM using the flash access registers.
3259  **/
3260 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3261 				  u16 *data)
3262 {
3263 	struct e1000_nvm_info *nvm = &hw->nvm;
3264 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3265 	u32 act_offset;
3266 	s32 ret_val = E1000_SUCCESS;
3267 	u32 bank = 0;
3268 	u16 i, word;
3269 
3270 	DEBUGFUNC("e1000_read_nvm_ich8lan");
3271 
3272 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3273 	    (words == 0)) {
3274 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3275 		ret_val = -E1000_ERR_NVM;
3276 		goto out;
3277 	}
3278 
3279 	nvm->ops.acquire(hw);
3280 
3281 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3282 	if (ret_val != E1000_SUCCESS) {
3283 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3284 		bank = 0;
3285 	}
3286 
3287 	act_offset = (bank) ? nvm->flash_bank_size : 0;
3288 	act_offset += offset;
3289 
3290 	ret_val = E1000_SUCCESS;
3291 	for (i = 0; i < words; i++) {
3292 		if (dev_spec->shadow_ram[offset+i].modified) {
3293 			data[i] = dev_spec->shadow_ram[offset+i].value;
3294 		} else {
3295 			ret_val = e1000_read_flash_word_ich8lan(hw,
3296 								act_offset + i,
3297 								&word);
3298 			if (ret_val)
3299 				break;
3300 			data[i] = word;
3301 		}
3302 	}
3303 
3304 	nvm->ops.release(hw);
3305 
3306 out:
3307 	if (ret_val)
3308 		DEBUGOUT1("NVM read error: %d\n", ret_val);
3309 
3310 	return ret_val;
3311 }
3312 
3313 /**
3314  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3315  *  @hw: pointer to the HW structure
3316  *
3317  *  This function does initial flash setup so that a new read/write/erase cycle
3318  *  can be started.
3319  **/
3320 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3321 {
3322 	union ich8_hws_flash_status hsfsts;
3323 	s32 ret_val = -E1000_ERR_NVM;
3324 
3325 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3326 
3327 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3328 
3329 	/* Check if the flash descriptor is valid */
3330 	if (!hsfsts.hsf_status.fldesvalid) {
3331 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3332 		return -E1000_ERR_NVM;
3333 	}
3334 
3335 	/* Clear FCERR and DAEL in hw status by writing 1 */
3336 	hsfsts.hsf_status.flcerr = 1;
3337 	hsfsts.hsf_status.dael = 1;
3338 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3339 
3340 	/* Either we should have a hardware SPI cycle in progress
3341 	 * bit to check against, in order to start a new cycle or
3342 	 * FDONE bit should be changed in the hardware so that it
3343 	 * is 1 after hardware reset, which can then be used as an
3344 	 * indication whether a cycle is in progress or has been
3345 	 * completed.
3346 	 */
3347 
3348 	if (!hsfsts.hsf_status.flcinprog) {
3349 		/* There is no cycle running at present,
3350 		 * so we can start a cycle.
3351 		 * Begin by setting Flash Cycle Done.
3352 		 */
3353 		hsfsts.hsf_status.flcdone = 1;
3354 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3355 		ret_val = E1000_SUCCESS;
3356 	} else {
3357 		s32 i;
3358 
3359 		/* Otherwise poll for sometime so the current
3360 		 * cycle has a chance to end before giving up.
3361 		 */
3362 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3363 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3364 							      ICH_FLASH_HSFSTS);
3365 			if (!hsfsts.hsf_status.flcinprog) {
3366 				ret_val = E1000_SUCCESS;
3367 				break;
3368 			}
3369 			usec_delay(1);
3370 		}
3371 		if (ret_val == E1000_SUCCESS) {
3372 			/* Successful in waiting for previous cycle to timeout,
3373 			 * now set the Flash Cycle Done.
3374 			 */
3375 			hsfsts.hsf_status.flcdone = 1;
3376 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3377 						hsfsts.regval);
3378 		} else {
3379 			DEBUGOUT("Flash controller busy, cannot get access\n");
3380 		}
3381 	}
3382 
3383 	return ret_val;
3384 }
3385 
3386 /**
3387  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3388  *  @hw: pointer to the HW structure
3389  *  @timeout: maximum time to wait for completion
3390  *
3391  *  This function starts a flash cycle and waits for its completion.
3392  **/
3393 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3394 {
3395 	union ich8_hws_flash_ctrl hsflctl;
3396 	union ich8_hws_flash_status hsfsts;
3397 	u32 i = 0;
3398 
3399 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3400 
3401 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3402 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3403 	hsflctl.hsf_ctrl.flcgo = 1;
3404 
3405 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3406 
3407 	/* wait till FDONE bit is set to 1 */
3408 	do {
3409 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3410 		if (hsfsts.hsf_status.flcdone)
3411 			break;
3412 		usec_delay(1);
3413 	} while (i++ < timeout);
3414 
3415 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3416 		return E1000_SUCCESS;
3417 
3418 	return -E1000_ERR_NVM;
3419 }
3420 
3421 /**
3422  *  e1000_read_flash_word_ich8lan - Read word from flash
3423  *  @hw: pointer to the HW structure
3424  *  @offset: offset to data location
3425  *  @data: pointer to the location for storing the data
3426  *
3427  *  Reads the flash word at offset into data.  Offset is converted
3428  *  to bytes before read.
3429  **/
3430 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3431 					 u16 *data)
3432 {
3433 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3434 
3435 	if (!data)
3436 		return -E1000_ERR_NVM;
3437 
3438 	/* Must convert offset into bytes. */
3439 	offset <<= 1;
3440 
3441 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3442 }
3443 
3444 /**
3445  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3446  *  @hw: pointer to the HW structure
3447  *  @offset: The offset of the byte to read.
3448  *  @data: Pointer to a byte to store the value read.
3449  *
3450  *  Reads a single byte from the NVM using the flash access registers.
3451  **/
3452 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3453 					 u8 *data)
3454 {
3455 	s32 ret_val;
3456 	u16 word = 0;
3457 
3458 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3459 
3460 	if (ret_val)
3461 		return ret_val;
3462 
3463 	*data = (u8)word;
3464 
3465 	return E1000_SUCCESS;
3466 }
3467 
3468 /**
3469  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3470  *  @hw: pointer to the HW structure
3471  *  @offset: The offset (in bytes) of the byte or word to read.
3472  *  @size: Size of data to read, 1=byte 2=word
3473  *  @data: Pointer to the word to store the value read.
3474  *
3475  *  Reads a byte or word from the NVM using the flash access registers.
3476  **/
3477 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3478 					 u8 size, u16 *data)
3479 {
3480 	union ich8_hws_flash_status hsfsts;
3481 	union ich8_hws_flash_ctrl hsflctl;
3482 	u32 flash_linear_addr;
3483 	u32 flash_data = 0;
3484 	s32 ret_val = -E1000_ERR_NVM;
3485 	u8 count = 0;
3486 
3487 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3488 
3489 	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3490 		return -E1000_ERR_NVM;
3491 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3492 			     hw->nvm.flash_base_addr);
3493 
3494 	do {
3495 		usec_delay(1);
3496 		/* Steps */
3497 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3498 		if (ret_val != E1000_SUCCESS)
3499 			break;
3500 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3501 
3502 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3503 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3504 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3505 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3506 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3507 
3508 		ret_val = e1000_flash_cycle_ich8lan(hw,
3509 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3510 
3511 		/* Check if FCERR is set to 1, if set to 1, clear it
3512 		 * and try the whole sequence a few more times, else
3513 		 * read in (shift in) the Flash Data0, the order is
3514 		 * least significant byte first msb to lsb
3515 		 */
3516 		if (ret_val == E1000_SUCCESS) {
3517 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3518 			if (size == 1)
3519 				*data = (u8)(flash_data & 0x000000FF);
3520 			else if (size == 2)
3521 				*data = (u16)(flash_data & 0x0000FFFF);
3522 			break;
3523 		} else {
3524 			/* If we've gotten here, then things are probably
3525 			 * completely hosed, but if the error condition is
3526 			 * detected, it won't hurt to give it another try...
3527 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3528 			 */
3529 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3530 							      ICH_FLASH_HSFSTS);
3531 			if (hsfsts.hsf_status.flcerr) {
3532 				/* Repeat for some time before giving up. */
3533 				continue;
3534 			} else if (!hsfsts.hsf_status.flcdone) {
3535 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3536 				break;
3537 			}
3538 		}
3539 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3540 
3541 	return ret_val;
3542 }
3543 
3544 
3545 /**
3546  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3547  *  @hw: pointer to the HW structure
3548  *  @offset: The offset (in bytes) of the word(s) to write.
3549  *  @words: Size of data to write in words
3550  *  @data: Pointer to the word(s) to write at offset.
3551  *
3552  *  Writes a byte or word to the NVM using the flash access registers.
3553  **/
3554 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3555 				   u16 *data)
3556 {
3557 	struct e1000_nvm_info *nvm = &hw->nvm;
3558 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3559 	u16 i;
3560 
3561 	DEBUGFUNC("e1000_write_nvm_ich8lan");
3562 
3563 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3564 	    (words == 0)) {
3565 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3566 		return -E1000_ERR_NVM;
3567 	}
3568 
3569 	nvm->ops.acquire(hw);
3570 
3571 	for (i = 0; i < words; i++) {
3572 		dev_spec->shadow_ram[offset+i].modified = TRUE;
3573 		dev_spec->shadow_ram[offset+i].value = data[i];
3574 	}
3575 
3576 	nvm->ops.release(hw);
3577 
3578 	return E1000_SUCCESS;
3579 }
3580 
3581 /**
3582  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3583  *  @hw: pointer to the HW structure
3584  *
3585  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3586  *  which writes the checksum to the shadow ram.  The changes in the shadow
3587  *  ram are then committed to the EEPROM by processing each bank at a time
3588  *  checking for the modified bit and writing only the pending changes.
3589  *  After a successful commit, the shadow ram is cleared and is ready for
3590  *  future writes.
3591  **/
3592 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3593 {
3594 	struct e1000_nvm_info *nvm = &hw->nvm;
3595 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3596 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3597 	s32 ret_val;
3598 	u16 data = 0;
3599 
3600 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3601 
3602 	ret_val = e1000_update_nvm_checksum_generic(hw);
3603 	if (ret_val)
3604 		goto out;
3605 
3606 	if (nvm->type != e1000_nvm_flash_sw)
3607 		goto out;
3608 
3609 	nvm->ops.acquire(hw);
3610 
3611 	/* We're writing to the opposite bank so if we're on bank 1,
3612 	 * write to bank 0 etc.  We also need to erase the segment that
3613 	 * is going to be written
3614 	 */
3615 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3616 	if (ret_val != E1000_SUCCESS) {
3617 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3618 		bank = 0;
3619 	}
3620 
3621 	if (bank == 0) {
3622 		new_bank_offset = nvm->flash_bank_size;
3623 		old_bank_offset = 0;
3624 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3625 		if (ret_val)
3626 			goto release;
3627 	} else {
3628 		old_bank_offset = nvm->flash_bank_size;
3629 		new_bank_offset = 0;
3630 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3631 		if (ret_val)
3632 			goto release;
3633 	}
3634 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3635 		if (dev_spec->shadow_ram[i].modified) {
3636 			data = dev_spec->shadow_ram[i].value;
3637 		} else {
3638 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
3639 								old_bank_offset,
3640 								&data);
3641 			if (ret_val)
3642 				break;
3643 		}
3644 		/* If the word is 0x13, then make sure the signature bits
3645 		 * (15:14) are 11b until the commit has completed.
3646 		 * This will allow us to write 10b which indicates the
3647 		 * signature is valid.  We want to do this after the write
3648 		 * has completed so that we don't mark the segment valid
3649 		 * while the write is still in progress
3650 		 */
3651 		if (i == E1000_ICH_NVM_SIG_WORD)
3652 			data |= E1000_ICH_NVM_SIG_MASK;
3653 
3654 		/* Convert offset to bytes. */
3655 		act_offset = (i + new_bank_offset) << 1;
3656 
3657 		usec_delay(100);
3658 
3659 		/* Write the bytes to the new bank. */
3660 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3661 							       act_offset,
3662 							       (u8)data);
3663 		if (ret_val)
3664 			break;
3665 
3666 		usec_delay(100);
3667 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3668 							  act_offset + 1,
3669 							  (u8)(data >> 8));
3670 		if (ret_val)
3671 			break;
3672 	 }
3673 
3674 	/* Don't bother writing the segment valid bits if sector
3675 	 * programming failed.
3676 	 */
3677 	if (ret_val) {
3678 		DEBUGOUT("Flash commit failed.\n");
3679 		goto release;
3680 	}
3681 
3682 	/* Finally validate the new segment by setting bit 15:14
3683 	 * to 10b in word 0x13 , this can be done without an
3684 	 * erase as well since these bits are 11 to start with
3685 	 * and we need to change bit 14 to 0b
3686 	 */
3687 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3688 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3689 	if (ret_val)
3690 		goto release;
3691 
3692 	data &= 0xBFFF;
3693 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
3694 						       (u8)(data >> 8));
3695 	if (ret_val)
3696 		goto release;
3697 
3698 	/* And invalidate the previously valid segment by setting
3699 	 * its signature word (0x13) high_byte to 0b. This can be
3700 	 * done without an erase because flash erase sets all bits
3701 	 * to 1's. We can write 1's to 0's without an erase
3702 	 */
3703 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3704 
3705 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3706 
3707 	if (ret_val)
3708 		goto release;
3709 
3710 	/* Great!  Everything worked, we can now clear the cached entries. */
3711 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3712 		dev_spec->shadow_ram[i].modified = FALSE;
3713 		dev_spec->shadow_ram[i].value = 0xFFFF;
3714 	}
3715 
3716 release:
3717 	nvm->ops.release(hw);
3718 
3719 	/* Reload the EEPROM, or else modifications will not appear
3720 	 * until after the next adapter reset.
3721 	 */
3722 	if (!ret_val) {
3723 		nvm->ops.reload(hw);
3724 		msec_delay(10);
3725 	}
3726 
3727 out:
3728 	if (ret_val)
3729 		DEBUGOUT1("NVM update error: %d\n", ret_val);
3730 
3731 	return ret_val;
3732 }
3733 
3734 /**
3735  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3736  *  @hw: pointer to the HW structure
3737  *
3738  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3739  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3740  *  calculated, in which case we need to calculate the checksum and set bit 6.
3741  **/
3742 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3743 {
3744 	s32 ret_val;
3745 	u16 data;
3746 	u16 word;
3747 	u16 valid_csum_mask;
3748 
3749 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3750 
3751 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3752 	 * the checksum needs to be fixed.  This bit is an indication that
3753 	 * the NVM was prepared by OEM software and did not calculate
3754 	 * the checksum...a likely scenario.
3755 	 */
3756 	switch (hw->mac.type) {
3757 	case e1000_pch_lpt:
3758 		word = NVM_COMPAT;
3759 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3760 		break;
3761 	default:
3762 		word = NVM_FUTURE_INIT_WORD1;
3763 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3764 		break;
3765 	}
3766 
3767 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3768 	if (ret_val)
3769 		return ret_val;
3770 
3771 	if (!(data & valid_csum_mask)) {
3772 		data |= valid_csum_mask;
3773 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3774 		if (ret_val)
3775 			return ret_val;
3776 		ret_val = hw->nvm.ops.update(hw);
3777 		if (ret_val)
3778 			return ret_val;
3779 	}
3780 
3781 	return e1000_validate_nvm_checksum_generic(hw);
3782 }
3783 
3784 /**
3785  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3786  *  @hw: pointer to the HW structure
3787  *  @offset: The offset (in bytes) of the byte/word to read.
3788  *  @size: Size of data to read, 1=byte 2=word
3789  *  @data: The byte(s) to write to the NVM.
3790  *
3791  *  Writes one/two bytes to the NVM using the flash access registers.
3792  **/
3793 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3794 					  u8 size, u16 data)
3795 {
3796 	union ich8_hws_flash_status hsfsts;
3797 	union ich8_hws_flash_ctrl hsflctl;
3798 	u32 flash_linear_addr;
3799 	u32 flash_data = 0;
3800 	s32 ret_val;
3801 	u8 count = 0;
3802 
3803 	DEBUGFUNC("e1000_write_ich8_data");
3804 
3805 	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3806 		return -E1000_ERR_NVM;
3807 
3808 	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3809 			     hw->nvm.flash_base_addr);
3810 
3811 	do {
3812 		usec_delay(1);
3813 		/* Steps */
3814 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3815 		if (ret_val != E1000_SUCCESS)
3816 			break;
3817 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3818 
3819 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3820 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3821 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3822 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3823 
3824 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3825 
3826 		if (size == 1)
3827 			flash_data = (u32)data & 0x00FF;
3828 		else
3829 			flash_data = (u32)data;
3830 
3831 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3832 
3833 		/* check if FCERR is set to 1 , if set to 1, clear it
3834 		 * and try the whole sequence a few more times else done
3835 		 */
3836 		ret_val =
3837 		    e1000_flash_cycle_ich8lan(hw,
3838 					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3839 		if (ret_val == E1000_SUCCESS)
3840 			break;
3841 
3842 		/* If we're here, then things are most likely
3843 		 * completely hosed, but if the error condition
3844 		 * is detected, it won't hurt to give it another
3845 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3846 		 */
3847 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3848 		if (hsfsts.hsf_status.flcerr)
3849 			/* Repeat for some time before giving up. */
3850 			continue;
3851 		if (!hsfsts.hsf_status.flcdone) {
3852 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3853 			break;
3854 		}
3855 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3856 
3857 	return ret_val;
3858 }
3859 
3860 
3861 /**
3862  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3863  *  @hw: pointer to the HW structure
3864  *  @offset: The index of the byte to read.
3865  *  @data: The byte to write to the NVM.
3866  *
3867  *  Writes a single byte to the NVM using the flash access registers.
3868  **/
3869 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3870 					  u8 data)
3871 {
3872 	u16 word = (u16)data;
3873 
3874 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3875 
3876 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3877 }
3878 
3879 
3880 
3881 /**
3882  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3883  *  @hw: pointer to the HW structure
3884  *  @offset: The offset of the byte to write.
3885  *  @byte: The byte to write to the NVM.
3886  *
3887  *  Writes a single byte to the NVM using the flash access registers.
3888  *  Goes through a retry algorithm before giving up.
3889  **/
3890 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3891 						u32 offset, u8 byte)
3892 {
3893 	s32 ret_val;
3894 	u16 program_retries;
3895 
3896 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3897 
3898 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3899 	if (!ret_val)
3900 		return ret_val;
3901 
3902 	for (program_retries = 0; program_retries < 100; program_retries++) {
3903 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3904 		usec_delay(100);
3905 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3906 		if (ret_val == E1000_SUCCESS)
3907 			break;
3908 	}
3909 	if (program_retries == 100)
3910 		return -E1000_ERR_NVM;
3911 
3912 	return E1000_SUCCESS;
3913 }
3914 
3915 /**
3916  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3917  *  @hw: pointer to the HW structure
3918  *  @bank: 0 for first bank, 1 for second bank, etc.
3919  *
3920  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3921  *  bank N is 4096 * N + flash_reg_addr.
3922  **/
3923 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3924 {
3925 	struct e1000_nvm_info *nvm = &hw->nvm;
3926 	union ich8_hws_flash_status hsfsts;
3927 	union ich8_hws_flash_ctrl hsflctl;
3928 	u32 flash_linear_addr;
3929 	/* bank size is in 16bit words - adjust to bytes */
3930 	u32 flash_bank_size = nvm->flash_bank_size * 2;
3931 	s32 ret_val;
3932 	s32 count = 0;
3933 	s32 j, iteration, sector_size;
3934 
3935 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3936 
3937 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3938 
3939 	/* Determine HW Sector size: Read BERASE bits of hw flash status
3940 	 * register
3941 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3942 	 *     consecutive sectors.  The start index for the nth Hw sector
3943 	 *     can be calculated as = bank * 4096 + n * 256
3944 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3945 	 *     The start index for the nth Hw sector can be calculated
3946 	 *     as = bank * 4096
3947 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3948 	 *     (ich9 only, otherwise error condition)
3949 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3950 	 */
3951 	switch (hsfsts.hsf_status.berasesz) {
3952 	case 0:
3953 		/* Hw sector size 256 */
3954 		sector_size = ICH_FLASH_SEG_SIZE_256;
3955 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3956 		break;
3957 	case 1:
3958 		sector_size = ICH_FLASH_SEG_SIZE_4K;
3959 		iteration = 1;
3960 		break;
3961 	case 2:
3962 		sector_size = ICH_FLASH_SEG_SIZE_8K;
3963 		iteration = 1;
3964 		break;
3965 	case 3:
3966 		sector_size = ICH_FLASH_SEG_SIZE_64K;
3967 		iteration = 1;
3968 		break;
3969 	default:
3970 		return -E1000_ERR_NVM;
3971 	}
3972 
3973 	/* Start with the base address, then add the sector offset. */
3974 	flash_linear_addr = hw->nvm.flash_base_addr;
3975 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3976 
3977 	for (j = 0; j < iteration; j++) {
3978 		do {
3979 			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3980 
3981 			/* Steps */
3982 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3983 			if (ret_val)
3984 				return ret_val;
3985 
3986 			/* Write a value 11 (block Erase) in Flash
3987 			 * Cycle field in hw flash control
3988 			 */
3989 			hsflctl.regval =
3990 			    E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3991 
3992 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3993 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3994 						hsflctl.regval);
3995 
3996 			/* Write the last 24 bits of an index within the
3997 			 * block into Flash Linear address field in Flash
3998 			 * Address.
3999 			 */
4000 			flash_linear_addr += (j * sector_size);
4001 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4002 					      flash_linear_addr);
4003 
4004 			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4005 			if (ret_val == E1000_SUCCESS)
4006 				break;
4007 
4008 			/* Check if FCERR is set to 1.  If 1,
4009 			 * clear it and try the whole sequence
4010 			 * a few more times else Done
4011 			 */
4012 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4013 						      ICH_FLASH_HSFSTS);
4014 			if (hsfsts.hsf_status.flcerr)
4015 				/* repeat for some time before giving up */
4016 				continue;
4017 			else if (!hsfsts.hsf_status.flcdone)
4018 				return ret_val;
4019 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4020 	}
4021 
4022 	return E1000_SUCCESS;
4023 }
4024 
4025 /**
4026  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4027  *  @hw: pointer to the HW structure
4028  *  @data: Pointer to the LED settings
4029  *
4030  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4031  *  settings is all 0's or F's, set the LED default to a valid LED default
4032  *  setting.
4033  **/
4034 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4035 {
4036 	s32 ret_val;
4037 
4038 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
4039 
4040 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4041 	if (ret_val) {
4042 		DEBUGOUT("NVM Read Error\n");
4043 		return ret_val;
4044 	}
4045 
4046 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4047 		*data = ID_LED_DEFAULT_ICH8LAN;
4048 
4049 	return E1000_SUCCESS;
4050 }
4051 
4052 /**
4053  *  e1000_id_led_init_pchlan - store LED configurations
4054  *  @hw: pointer to the HW structure
4055  *
4056  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4057  *  the PHY LED configuration register.
4058  *
4059  *  PCH also does not have an "always on" or "always off" mode which
4060  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4061  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4062  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4063  *  link based on logic in e1000_led_[on|off]_pchlan().
4064  **/
4065 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4066 {
4067 	struct e1000_mac_info *mac = &hw->mac;
4068 	s32 ret_val;
4069 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4070 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4071 	u16 data, i, temp, shift;
4072 
4073 	DEBUGFUNC("e1000_id_led_init_pchlan");
4074 
4075 	/* Get default ID LED modes */
4076 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4077 	if (ret_val)
4078 		return ret_val;
4079 
4080 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4081 	mac->ledctl_mode1 = mac->ledctl_default;
4082 	mac->ledctl_mode2 = mac->ledctl_default;
4083 
4084 	for (i = 0; i < 4; i++) {
4085 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4086 		shift = (i * 5);
4087 		switch (temp) {
4088 		case ID_LED_ON1_DEF2:
4089 		case ID_LED_ON1_ON2:
4090 		case ID_LED_ON1_OFF2:
4091 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4092 			mac->ledctl_mode1 |= (ledctl_on << shift);
4093 			break;
4094 		case ID_LED_OFF1_DEF2:
4095 		case ID_LED_OFF1_ON2:
4096 		case ID_LED_OFF1_OFF2:
4097 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4098 			mac->ledctl_mode1 |= (ledctl_off << shift);
4099 			break;
4100 		default:
4101 			/* Do nothing */
4102 			break;
4103 		}
4104 		switch (temp) {
4105 		case ID_LED_DEF1_ON2:
4106 		case ID_LED_ON1_ON2:
4107 		case ID_LED_OFF1_ON2:
4108 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4109 			mac->ledctl_mode2 |= (ledctl_on << shift);
4110 			break;
4111 		case ID_LED_DEF1_OFF2:
4112 		case ID_LED_ON1_OFF2:
4113 		case ID_LED_OFF1_OFF2:
4114 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4115 			mac->ledctl_mode2 |= (ledctl_off << shift);
4116 			break;
4117 		default:
4118 			/* Do nothing */
4119 			break;
4120 		}
4121 	}
4122 
4123 	return E1000_SUCCESS;
4124 }
4125 
4126 /**
4127  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4128  *  @hw: pointer to the HW structure
4129  *
4130  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4131  *  register, so the the bus width is hard coded.
4132  **/
4133 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4134 {
4135 	struct e1000_bus_info *bus = &hw->bus;
4136 	s32 ret_val;
4137 
4138 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
4139 
4140 	ret_val = e1000_get_bus_info_pcie_generic(hw);
4141 
4142 	/* ICH devices are "PCI Express"-ish.  They have
4143 	 * a configuration space, but do not contain
4144 	 * PCI Express Capability registers, so bus width
4145 	 * must be hardcoded.
4146 	 */
4147 	if (bus->width == e1000_bus_width_unknown)
4148 		bus->width = e1000_bus_width_pcie_x1;
4149 
4150 	return ret_val;
4151 }
4152 
4153 /**
4154  *  e1000_reset_hw_ich8lan - Reset the hardware
4155  *  @hw: pointer to the HW structure
4156  *
4157  *  Does a full reset of the hardware which includes a reset of the PHY and
4158  *  MAC.
4159  **/
4160 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4161 {
4162 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4163 	u16 kum_cfg;
4164 	u32 ctrl, reg;
4165 	s32 ret_val;
4166 
4167 	DEBUGFUNC("e1000_reset_hw_ich8lan");
4168 
4169 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4170 	 * on the last TLP read/write transaction when MAC is reset.
4171 	 */
4172 	ret_val = e1000_disable_pcie_master_generic(hw);
4173 	if (ret_val)
4174 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
4175 
4176 	DEBUGOUT("Masking off all interrupts\n");
4177 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4178 
4179 	/* Disable the Transmit and Receive units.  Then delay to allow
4180 	 * any pending transactions to complete before we hit the MAC
4181 	 * with the global reset.
4182 	 */
4183 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
4184 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4185 	E1000_WRITE_FLUSH(hw);
4186 
4187 	msec_delay(10);
4188 
4189 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4190 	if (hw->mac.type == e1000_ich8lan) {
4191 		/* Set Tx and Rx buffer allocation to 8k apiece. */
4192 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4193 		/* Set Packet Buffer Size to 16k. */
4194 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4195 	}
4196 
4197 	if (hw->mac.type == e1000_pchlan) {
4198 		/* Save the NVM K1 bit setting*/
4199 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4200 		if (ret_val)
4201 			return ret_val;
4202 
4203 		if (kum_cfg & E1000_NVM_K1_ENABLE)
4204 			dev_spec->nvm_k1_enabled = TRUE;
4205 		else
4206 			dev_spec->nvm_k1_enabled = FALSE;
4207 	}
4208 
4209 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4210 
4211 	if (!hw->phy.ops.check_reset_block(hw)) {
4212 		/* Full-chip reset requires MAC and PHY reset at the same
4213 		 * time to make sure the interface between MAC and the
4214 		 * external PHY is reset.
4215 		 */
4216 		ctrl |= E1000_CTRL_PHY_RST;
4217 
4218 		/* Gate automatic PHY configuration by hardware on
4219 		 * non-managed 82579
4220 		 */
4221 		if ((hw->mac.type == e1000_pch2lan) &&
4222 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4223 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
4224 	}
4225 	ret_val = e1000_acquire_swflag_ich8lan(hw);
4226 	DEBUGOUT("Issuing a global reset to ich8lan\n");
4227 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4228 	/* cannot issue a flush here because it hangs the hardware */
4229 	msec_delay(20);
4230 
4231 	/* Set Phy Config Counter to 50msec */
4232 	if (hw->mac.type == e1000_pch2lan) {
4233 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4234 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4235 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4236 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4237 	}
4238 
4239 	if (ctrl & E1000_CTRL_PHY_RST) {
4240 		ret_val = hw->phy.ops.get_cfg_done(hw);
4241 		if (ret_val)
4242 			return ret_val;
4243 
4244 		ret_val = e1000_post_phy_reset_ich8lan(hw);
4245 		if (ret_val)
4246 			return ret_val;
4247 	}
4248 
4249 	/* For PCH, this write will make sure that any noise
4250 	 * will be detected as a CRC error and be dropped rather than show up
4251 	 * as a bad packet to the DMA engine.
4252 	 */
4253 	if (hw->mac.type == e1000_pchlan)
4254 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4255 
4256 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4257 	E1000_READ_REG(hw, E1000_ICR);
4258 
4259 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
4260 	reg |= E1000_KABGTXD_BGSQLBIAS;
4261 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4262 
4263 	return E1000_SUCCESS;
4264 }
4265 
4266 /**
4267  *  e1000_init_hw_ich8lan - Initialize the hardware
4268  *  @hw: pointer to the HW structure
4269  *
4270  *  Prepares the hardware for transmit and receive by doing the following:
4271  *   - initialize hardware bits
4272  *   - initialize LED identification
4273  *   - setup receive address registers
4274  *   - setup flow control
4275  *   - setup transmit descriptors
4276  *   - clear statistics
4277  **/
4278 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4279 {
4280 	struct e1000_mac_info *mac = &hw->mac;
4281 	u32 ctrl_ext, txdctl, snoop;
4282 	s32 ret_val;
4283 	u16 i;
4284 
4285 	DEBUGFUNC("e1000_init_hw_ich8lan");
4286 
4287 	e1000_initialize_hw_bits_ich8lan(hw);
4288 
4289 	/* Initialize identification LED */
4290 	ret_val = mac->ops.id_led_init(hw);
4291 	/* An error is not fatal and we should not stop init due to this */
4292 	if (ret_val)
4293 		DEBUGOUT("Error initializing identification LED\n");
4294 
4295 	/* Setup the receive address. */
4296 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4297 
4298 	/* Zero out the Multicast HASH table */
4299 	DEBUGOUT("Zeroing the MTA\n");
4300 	for (i = 0; i < mac->mta_reg_count; i++)
4301 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4302 
4303 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
4304 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
4305 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
4306 	 */
4307 	if (hw->phy.type == e1000_phy_82578) {
4308 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4309 		i &= ~BM_WUC_HOST_WU_BIT;
4310 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4311 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
4312 		if (ret_val)
4313 			return ret_val;
4314 	}
4315 
4316 	/* Setup link and flow control */
4317 	ret_val = mac->ops.setup_link(hw);
4318 
4319 	/* Set the transmit descriptor write-back policy for both queues */
4320 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4321 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4322 		  E1000_TXDCTL_FULL_TX_DESC_WB);
4323 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4324 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4325 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4326 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4327 	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4328 		  E1000_TXDCTL_FULL_TX_DESC_WB);
4329 	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4330 		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4331 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4332 
4333 	/* ICH8 has opposite polarity of no_snoop bits.
4334 	 * By default, we should use snoop behavior.
4335 	 */
4336 	if (mac->type == e1000_ich8lan)
4337 		snoop = PCIE_ICH8_SNOOP_ALL;
4338 	else
4339 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4340 	e1000_set_pcie_no_snoop_generic(hw, snoop);
4341 
4342 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4343 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4344 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4345 
4346 	/* Clear all of the statistics registers (clear on read).  It is
4347 	 * important that we do this after we have tried to establish link
4348 	 * because the symbol error count will increment wildly if there
4349 	 * is no link.
4350 	 */
4351 	e1000_clear_hw_cntrs_ich8lan(hw);
4352 
4353 	return ret_val;
4354 }
4355 
4356 /**
4357  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4358  *  @hw: pointer to the HW structure
4359  *
4360  *  Sets/Clears required hardware bits necessary for correctly setting up the
4361  *  hardware for transmit and receive.
4362  **/
4363 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4364 {
4365 	u32 reg;
4366 
4367 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4368 
4369 	/* Extended Device Control */
4370 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4371 	reg |= (1 << 22);
4372 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4373 	if (hw->mac.type >= e1000_pchlan)
4374 		reg |= E1000_CTRL_EXT_PHYPDEN;
4375 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4376 
4377 	/* Transmit Descriptor Control 0 */
4378 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4379 	reg |= (1 << 22);
4380 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4381 
4382 	/* Transmit Descriptor Control 1 */
4383 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4384 	reg |= (1 << 22);
4385 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4386 
4387 	/* Transmit Arbitration Control 0 */
4388 	reg = E1000_READ_REG(hw, E1000_TARC(0));
4389 	if (hw->mac.type == e1000_ich8lan)
4390 		reg |= (1 << 28) | (1 << 29);
4391 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4392 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4393 
4394 	/* Transmit Arbitration Control 1 */
4395 	reg = E1000_READ_REG(hw, E1000_TARC(1));
4396 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4397 		reg &= ~(1 << 28);
4398 	else
4399 		reg |= (1 << 28);
4400 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
4401 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4402 
4403 	/* Device Status */
4404 	if (hw->mac.type == e1000_ich8lan) {
4405 		reg = E1000_READ_REG(hw, E1000_STATUS);
4406 		reg &= ~(1 << 31);
4407 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
4408 	}
4409 
4410 	/* work-around descriptor data corruption issue during nfs v2 udp
4411 	 * traffic, just disable the nfs filtering capability
4412 	 */
4413 	reg = E1000_READ_REG(hw, E1000_RFCTL);
4414 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4415 
4416 	/* Disable IPv6 extension header parsing because some malformed
4417 	 * IPv6 headers can hang the Rx.
4418 	 */
4419 	if (hw->mac.type == e1000_ich8lan)
4420 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4421 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4422 
4423 	/* Enable ECC on Lynxpoint */
4424 	if (hw->mac.type == e1000_pch_lpt) {
4425 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4426 		reg |= E1000_PBECCSTS_ECC_ENABLE;
4427 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4428 
4429 		reg = E1000_READ_REG(hw, E1000_CTRL);
4430 		reg |= E1000_CTRL_MEHE;
4431 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
4432 	}
4433 
4434 	return;
4435 }
4436 
4437 /**
4438  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4439  *  @hw: pointer to the HW structure
4440  *
4441  *  Determines which flow control settings to use, then configures flow
4442  *  control.  Calls the appropriate media-specific link configuration
4443  *  function.  Assuming the adapter has a valid link partner, a valid link
4444  *  should be established.  Assumes the hardware has previously been reset
4445  *  and the transmitter and receiver are not enabled.
4446  **/
4447 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4448 {
4449 	s32 ret_val;
4450 
4451 	DEBUGFUNC("e1000_setup_link_ich8lan");
4452 
4453 	if (hw->phy.ops.check_reset_block(hw))
4454 		return E1000_SUCCESS;
4455 
4456 	/* ICH parts do not have a word in the NVM to determine
4457 	 * the default flow control setting, so we explicitly
4458 	 * set it to full.
4459 	 */
4460 	if (hw->fc.requested_mode == e1000_fc_default)
4461 		hw->fc.requested_mode = e1000_fc_full;
4462 
4463 	/* Save off the requested flow control mode for use later.  Depending
4464 	 * on the link partner's capabilities, we may or may not use this mode.
4465 	 */
4466 	hw->fc.current_mode = hw->fc.requested_mode;
4467 
4468 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4469 		hw->fc.current_mode);
4470 
4471 	/* Continue to configure the copper link. */
4472 	ret_val = hw->mac.ops.setup_physical_interface(hw);
4473 	if (ret_val)
4474 		return ret_val;
4475 
4476 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4477 	if ((hw->phy.type == e1000_phy_82578) ||
4478 	    (hw->phy.type == e1000_phy_82579) ||
4479 	    (hw->phy.type == e1000_phy_i217) ||
4480 	    (hw->phy.type == e1000_phy_82577)) {
4481 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4482 
4483 		ret_val = hw->phy.ops.write_reg(hw,
4484 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
4485 					     hw->fc.pause_time);
4486 		if (ret_val)
4487 			return ret_val;
4488 	}
4489 
4490 	return e1000_set_fc_watermarks_generic(hw);
4491 }
4492 
4493 /**
4494  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4495  *  @hw: pointer to the HW structure
4496  *
4497  *  Configures the kumeran interface to the PHY to wait the appropriate time
4498  *  when polling the PHY, then call the generic setup_copper_link to finish
4499  *  configuring the copper link.
4500  **/
4501 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4502 {
4503 	u32 ctrl;
4504 	s32 ret_val;
4505 	u16 reg_data;
4506 
4507 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4508 
4509 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4510 	ctrl |= E1000_CTRL_SLU;
4511 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4512 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4513 
4514 	/* Set the mac to wait the maximum time between each iteration
4515 	 * and increase the max iterations when polling the phy;
4516 	 * this fixes erroneous timeouts at 10Mbps.
4517 	 */
4518 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4519 					       0xFFFF);
4520 	if (ret_val)
4521 		return ret_val;
4522 	ret_val = e1000_read_kmrn_reg_generic(hw,
4523 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
4524 					      &reg_data);
4525 	if (ret_val)
4526 		return ret_val;
4527 	reg_data |= 0x3F;
4528 	ret_val = e1000_write_kmrn_reg_generic(hw,
4529 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
4530 					       reg_data);
4531 	if (ret_val)
4532 		return ret_val;
4533 
4534 	switch (hw->phy.type) {
4535 	case e1000_phy_igp_3:
4536 		ret_val = e1000_copper_link_setup_igp(hw);
4537 		if (ret_val)
4538 			return ret_val;
4539 		break;
4540 	case e1000_phy_bm:
4541 	case e1000_phy_82578:
4542 		ret_val = e1000_copper_link_setup_m88(hw);
4543 		if (ret_val)
4544 			return ret_val;
4545 		break;
4546 	case e1000_phy_82577:
4547 	case e1000_phy_82579:
4548 		ret_val = e1000_copper_link_setup_82577(hw);
4549 		if (ret_val)
4550 			return ret_val;
4551 		break;
4552 	case e1000_phy_ife:
4553 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4554 					       &reg_data);
4555 		if (ret_val)
4556 			return ret_val;
4557 
4558 		reg_data &= ~IFE_PMC_AUTO_MDIX;
4559 
4560 		switch (hw->phy.mdix) {
4561 		case 1:
4562 			reg_data &= ~IFE_PMC_FORCE_MDIX;
4563 			break;
4564 		case 2:
4565 			reg_data |= IFE_PMC_FORCE_MDIX;
4566 			break;
4567 		case 0:
4568 		default:
4569 			reg_data |= IFE_PMC_AUTO_MDIX;
4570 			break;
4571 		}
4572 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4573 						reg_data);
4574 		if (ret_val)
4575 			return ret_val;
4576 		break;
4577 	default:
4578 		break;
4579 	}
4580 
4581 	return e1000_setup_copper_link_generic(hw);
4582 }
4583 
4584 /**
4585  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4586  *  @hw: pointer to the HW structure
4587  *
4588  *  Calls the PHY specific link setup function and then calls the
4589  *  generic setup_copper_link to finish configuring the link for
4590  *  Lynxpoint PCH devices
4591  **/
4592 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4593 {
4594 	u32 ctrl;
4595 	s32 ret_val;
4596 
4597 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4598 
4599 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4600 	ctrl |= E1000_CTRL_SLU;
4601 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4602 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4603 
4604 	ret_val = e1000_copper_link_setup_82577(hw);
4605 	if (ret_val)
4606 		return ret_val;
4607 
4608 	return e1000_setup_copper_link_generic(hw);
4609 }
4610 
4611 /**
4612  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4613  *  @hw: pointer to the HW structure
4614  *  @speed: pointer to store current link speed
4615  *  @duplex: pointer to store the current link duplex
4616  *
4617  *  Calls the generic get_speed_and_duplex to retrieve the current link
4618  *  information and then calls the Kumeran lock loss workaround for links at
4619  *  gigabit speeds.
4620  **/
4621 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4622 					  u16 *duplex)
4623 {
4624 	s32 ret_val;
4625 
4626 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4627 
4628 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4629 	if (ret_val)
4630 		return ret_val;
4631 
4632 	if ((hw->mac.type == e1000_ich8lan) &&
4633 	    (hw->phy.type == e1000_phy_igp_3) &&
4634 	    (*speed == SPEED_1000)) {
4635 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4636 	}
4637 
4638 	return ret_val;
4639 }
4640 
4641 /**
4642  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4643  *  @hw: pointer to the HW structure
4644  *
4645  *  Work-around for 82566 Kumeran PCS lock loss:
4646  *  On link status change (i.e. PCI reset, speed change) and link is up and
4647  *  speed is gigabit-
4648  *    0) if workaround is optionally disabled do nothing
4649  *    1) wait 1ms for Kumeran link to come up
4650  *    2) check Kumeran Diagnostic register PCS lock loss bit
4651  *    3) if not set the link is locked (all is good), otherwise...
4652  *    4) reset the PHY
4653  *    5) repeat up to 10 times
4654  *  Note: this is only called for IGP3 copper when speed is 1gb.
4655  **/
4656 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4657 {
4658 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4659 	u32 phy_ctrl;
4660 	s32 ret_val;
4661 	u16 i, data;
4662 	bool link;
4663 
4664 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4665 
4666 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4667 		return E1000_SUCCESS;
4668 
4669 	/* Make sure link is up before proceeding.  If not just return.
4670 	 * Attempting this while link is negotiating fouled up link
4671 	 * stability
4672 	 */
4673 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4674 	if (!link)
4675 		return E1000_SUCCESS;
4676 
4677 	for (i = 0; i < 10; i++) {
4678 		/* read once to clear */
4679 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4680 		if (ret_val)
4681 			return ret_val;
4682 		/* and again to get new status */
4683 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4684 		if (ret_val)
4685 			return ret_val;
4686 
4687 		/* check for PCS lock */
4688 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4689 			return E1000_SUCCESS;
4690 
4691 		/* Issue PHY reset */
4692 		hw->phy.ops.reset(hw);
4693 		msec_delay_irq(5);
4694 	}
4695 	/* Disable GigE link negotiation */
4696 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4697 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4698 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4699 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4700 
4701 	/* Call gig speed drop workaround on Gig disable before accessing
4702 	 * any PHY registers
4703 	 */
4704 	e1000_gig_downshift_workaround_ich8lan(hw);
4705 
4706 	/* unable to acquire PCS lock */
4707 	return -E1000_ERR_PHY;
4708 }
4709 
4710 /**
4711  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4712  *  @hw: pointer to the HW structure
4713  *  @state: boolean value used to set the current Kumeran workaround state
4714  *
4715  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
4716  *  /disabled - FALSE).
4717  **/
4718 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4719 						 bool state)
4720 {
4721 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4722 
4723 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4724 
4725 	if (hw->mac.type != e1000_ich8lan) {
4726 		DEBUGOUT("Workaround applies to ICH8 only.\n");
4727 		return;
4728 	}
4729 
4730 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
4731 
4732 	return;
4733 }
4734 
4735 /**
4736  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4737  *  @hw: pointer to the HW structure
4738  *
4739  *  Workaround for 82566 power-down on D3 entry:
4740  *    1) disable gigabit link
4741  *    2) write VR power-down enable
4742  *    3) read it back
4743  *  Continue if successful, else issue LCD reset and repeat
4744  **/
4745 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4746 {
4747 	u32 reg;
4748 	u16 data;
4749 	u8  retry = 0;
4750 
4751 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4752 
4753 	if (hw->phy.type != e1000_phy_igp_3)
4754 		return;
4755 
4756 	/* Try the workaround twice (if needed) */
4757 	do {
4758 		/* Disable link */
4759 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4760 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4761 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4762 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4763 
4764 		/* Call gig speed drop workaround on Gig disable before
4765 		 * accessing any PHY registers
4766 		 */
4767 		if (hw->mac.type == e1000_ich8lan)
4768 			e1000_gig_downshift_workaround_ich8lan(hw);
4769 
4770 		/* Write VR power-down enable */
4771 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4772 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4773 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4774 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4775 
4776 		/* Read it back and test */
4777 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4778 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4779 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4780 			break;
4781 
4782 		/* Issue PHY reset and repeat at most one more time */
4783 		reg = E1000_READ_REG(hw, E1000_CTRL);
4784 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4785 		retry++;
4786 	} while (retry);
4787 }
4788 
4789 /**
4790  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4791  *  @hw: pointer to the HW structure
4792  *
4793  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4794  *  LPLU, Gig disable, MDIC PHY reset):
4795  *    1) Set Kumeran Near-end loopback
4796  *    2) Clear Kumeran Near-end loopback
4797  *  Should only be called for ICH8[m] devices with any 1G Phy.
4798  **/
4799 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4800 {
4801 	s32 ret_val;
4802 	u16 reg_data;
4803 
4804 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4805 
4806 	if ((hw->mac.type != e1000_ich8lan) ||
4807 	    (hw->phy.type == e1000_phy_ife))
4808 		return;
4809 
4810 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4811 					      &reg_data);
4812 	if (ret_val)
4813 		return;
4814 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4815 	ret_val = e1000_write_kmrn_reg_generic(hw,
4816 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
4817 					       reg_data);
4818 	if (ret_val)
4819 		return;
4820 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4821 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4822 				     reg_data);
4823 }
4824 
4825 /**
4826  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4827  *  @hw: pointer to the HW structure
4828  *
4829  *  During S0 to Sx transition, it is possible the link remains at gig
4830  *  instead of negotiating to a lower speed.  Before going to Sx, set
4831  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4832  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4833  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4834  *  needs to be written.
4835  *  Parts that support (and are linked to a partner which support) EEE in
4836  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4837  *  than 10Mbps w/o EEE.
4838  **/
4839 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4840 {
4841 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4842 	u32 phy_ctrl;
4843 	s32 ret_val;
4844 
4845 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4846 
4847 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4848 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4849 
4850 	if (hw->phy.type == e1000_phy_i217) {
4851 		u16 phy_reg, device_id = hw->device_id;
4852 
4853 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4854 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4855 		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4856 		    (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4857 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4858 
4859 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4860 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4861 		}
4862 
4863 		ret_val = hw->phy.ops.acquire(hw);
4864 		if (ret_val)
4865 			goto out;
4866 
4867 		if (!dev_spec->eee_disable) {
4868 			u16 eee_advert;
4869 
4870 			ret_val =
4871 			    e1000_read_emi_reg_locked(hw,
4872 						      I217_EEE_ADVERTISEMENT,
4873 						      &eee_advert);
4874 			if (ret_val)
4875 				goto release;
4876 
4877 			/* Disable LPLU if both link partners support 100BaseT
4878 			 * EEE and 100Full is advertised on both ends of the
4879 			 * link, and enable Auto Enable LPI since there will
4880 			 * be no driver to enable LPI while in Sx.
4881 			 */
4882 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4883 			    (dev_spec->eee_lp_ability &
4884 			     I82579_EEE_100_SUPPORTED) &&
4885 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4886 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4887 					      E1000_PHY_CTRL_NOND0A_LPLU);
4888 
4889 				/* Set Auto Enable LPI after link up */
4890 				hw->phy.ops.read_reg_locked(hw,
4891 							    I217_LPI_GPIO_CTRL,
4892 							    &phy_reg);
4893 				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4894 				hw->phy.ops.write_reg_locked(hw,
4895 							     I217_LPI_GPIO_CTRL,
4896 							     phy_reg);
4897 			}
4898 		}
4899 
4900 		/* For i217 Intel Rapid Start Technology support,
4901 		 * when the system is going into Sx and no manageability engine
4902 		 * is present, the driver must configure proxy to reset only on
4903 		 * power good.  LPI (Low Power Idle) state must also reset only
4904 		 * on power good, as well as the MTA (Multicast table array).
4905 		 * The SMBus release must also be disabled on LCD reset.
4906 		 */
4907 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
4908 		      E1000_ICH_FWSM_FW_VALID)) {
4909 			/* Enable proxy to reset only on power good. */
4910 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4911 						    &phy_reg);
4912 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4913 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4914 						     phy_reg);
4915 
4916 			/* Set bit enable LPI (EEE) to reset only on
4917 			 * power good.
4918 			*/
4919 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4920 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4921 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4922 
4923 			/* Disable the SMB release on LCD reset. */
4924 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4925 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4926 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4927 		}
4928 
4929 		/* Enable MTA to reset for Intel Rapid Start Technology
4930 		 * Support
4931 		 */
4932 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4933 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4934 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4935 
4936 release:
4937 		hw->phy.ops.release(hw);
4938 	}
4939 out:
4940 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4941 
4942 	if (hw->mac.type == e1000_ich8lan)
4943 		e1000_gig_downshift_workaround_ich8lan(hw);
4944 
4945 	if (hw->mac.type >= e1000_pchlan) {
4946 		e1000_oem_bits_config_ich8lan(hw, FALSE);
4947 
4948 		/* Reset PHY to activate OEM bits on 82577/8 */
4949 		if (hw->mac.type == e1000_pchlan)
4950 			e1000_phy_hw_reset_generic(hw);
4951 
4952 		ret_val = hw->phy.ops.acquire(hw);
4953 		if (ret_val)
4954 			return;
4955 		e1000_write_smbus_addr(hw);
4956 		hw->phy.ops.release(hw);
4957 	}
4958 
4959 	return;
4960 }
4961 
4962 /**
4963  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4964  *  @hw: pointer to the HW structure
4965  *
4966  *  During Sx to S0 transitions on non-managed devices or managed devices
4967  *  on which PHY resets are not blocked, if the PHY registers cannot be
4968  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4969  *  the PHY.
4970  *  On i217, setup Intel Rapid Start Technology.
4971  **/
4972 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4973 {
4974 	s32 ret_val;
4975 
4976 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
4977 
4978 	if (hw->mac.type < e1000_pch2lan)
4979 		return;
4980 
4981 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
4982 	if (ret_val) {
4983 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4984 		return;
4985 	}
4986 
4987 	/* For i217 Intel Rapid Start Technology support when the system
4988 	 * is transitioning from Sx and no manageability engine is present
4989 	 * configure SMBus to restore on reset, disable proxy, and enable
4990 	 * the reset on MTA (Multicast table array).
4991 	 */
4992 	if (hw->phy.type == e1000_phy_i217) {
4993 		u16 phy_reg;
4994 
4995 		ret_val = hw->phy.ops.acquire(hw);
4996 		if (ret_val) {
4997 			DEBUGOUT("Failed to setup iRST\n");
4998 			return;
4999 		}
5000 
5001 		/* Clear Auto Enable LPI after link up */
5002 		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5003 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5004 		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5005 
5006 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5007 		    E1000_ICH_FWSM_FW_VALID)) {
5008 			/* Restore clear on SMB if no manageability engine
5009 			 * is present
5010 			 */
5011 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5012 							      &phy_reg);
5013 			if (ret_val)
5014 				goto release;
5015 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5016 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5017 
5018 			/* Disable Proxy */
5019 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5020 		}
5021 		/* Enable reset on MTA */
5022 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5023 						      &phy_reg);
5024 		if (ret_val)
5025 			goto release;
5026 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5027 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5028 release:
5029 		if (ret_val)
5030 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5031 		hw->phy.ops.release(hw);
5032 	}
5033 }
5034 
5035 /**
5036  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5037  *  @hw: pointer to the HW structure
5038  *
5039  *  Return the LED back to the default configuration.
5040  **/
5041 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5042 {
5043 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
5044 
5045 	if (hw->phy.type == e1000_phy_ife)
5046 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5047 					     0);
5048 
5049 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5050 	return E1000_SUCCESS;
5051 }
5052 
5053 /**
5054  *  e1000_led_on_ich8lan - Turn LEDs on
5055  *  @hw: pointer to the HW structure
5056  *
5057  *  Turn on the LEDs.
5058  **/
5059 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5060 {
5061 	DEBUGFUNC("e1000_led_on_ich8lan");
5062 
5063 	if (hw->phy.type == e1000_phy_ife)
5064 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5065 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5066 
5067 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5068 	return E1000_SUCCESS;
5069 }
5070 
5071 /**
5072  *  e1000_led_off_ich8lan - Turn LEDs off
5073  *  @hw: pointer to the HW structure
5074  *
5075  *  Turn off the LEDs.
5076  **/
5077 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5078 {
5079 	DEBUGFUNC("e1000_led_off_ich8lan");
5080 
5081 	if (hw->phy.type == e1000_phy_ife)
5082 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5083 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5084 
5085 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5086 	return E1000_SUCCESS;
5087 }
5088 
5089 /**
5090  *  e1000_setup_led_pchlan - Configures SW controllable LED
5091  *  @hw: pointer to the HW structure
5092  *
5093  *  This prepares the SW controllable LED for use.
5094  **/
5095 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5096 {
5097 	DEBUGFUNC("e1000_setup_led_pchlan");
5098 
5099 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5100 				     (u16)hw->mac.ledctl_mode1);
5101 }
5102 
5103 /**
5104  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5105  *  @hw: pointer to the HW structure
5106  *
5107  *  Return the LED back to the default configuration.
5108  **/
5109 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5110 {
5111 	DEBUGFUNC("e1000_cleanup_led_pchlan");
5112 
5113 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5114 				     (u16)hw->mac.ledctl_default);
5115 }
5116 
5117 /**
5118  *  e1000_led_on_pchlan - Turn LEDs on
5119  *  @hw: pointer to the HW structure
5120  *
5121  *  Turn on the LEDs.
5122  **/
5123 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5124 {
5125 	u16 data = (u16)hw->mac.ledctl_mode2;
5126 	u32 i, led;
5127 
5128 	DEBUGFUNC("e1000_led_on_pchlan");
5129 
5130 	/* If no link, then turn LED on by setting the invert bit
5131 	 * for each LED that's mode is "link_up" in ledctl_mode2.
5132 	 */
5133 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5134 		for (i = 0; i < 3; i++) {
5135 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5136 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5137 			    E1000_LEDCTL_MODE_LINK_UP)
5138 				continue;
5139 			if (led & E1000_PHY_LED0_IVRT)
5140 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5141 			else
5142 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5143 		}
5144 	}
5145 
5146 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5147 }
5148 
5149 /**
5150  *  e1000_led_off_pchlan - Turn LEDs off
5151  *  @hw: pointer to the HW structure
5152  *
5153  *  Turn off the LEDs.
5154  **/
5155 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5156 {
5157 	u16 data = (u16)hw->mac.ledctl_mode1;
5158 	u32 i, led;
5159 
5160 	DEBUGFUNC("e1000_led_off_pchlan");
5161 
5162 	/* If no link, then turn LED off by clearing the invert bit
5163 	 * for each LED that's mode is "link_up" in ledctl_mode1.
5164 	 */
5165 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5166 		for (i = 0; i < 3; i++) {
5167 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5168 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5169 			    E1000_LEDCTL_MODE_LINK_UP)
5170 				continue;
5171 			if (led & E1000_PHY_LED0_IVRT)
5172 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5173 			else
5174 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5175 		}
5176 	}
5177 
5178 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5179 }
5180 
5181 /**
5182  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5183  *  @hw: pointer to the HW structure
5184  *
5185  *  Read appropriate register for the config done bit for completion status
5186  *  and configure the PHY through s/w for EEPROM-less parts.
5187  *
5188  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5189  *  config done bit, so only an error is logged and continues.  If we were
5190  *  to return with error, EEPROM-less silicon would not be able to be reset
5191  *  or change link.
5192  **/
5193 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5194 {
5195 	s32 ret_val = E1000_SUCCESS;
5196 	u32 bank = 0;
5197 	u32 status;
5198 
5199 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5200 
5201 	e1000_get_cfg_done_generic(hw);
5202 
5203 	/* Wait for indication from h/w that it has completed basic config */
5204 	if (hw->mac.type >= e1000_ich10lan) {
5205 		e1000_lan_init_done_ich8lan(hw);
5206 	} else {
5207 		ret_val = e1000_get_auto_rd_done_generic(hw);
5208 		if (ret_val) {
5209 			/* When auto config read does not complete, do not
5210 			 * return with an error. This can happen in situations
5211 			 * where there is no eeprom and prevents getting link.
5212 			 */
5213 			DEBUGOUT("Auto Read Done did not complete\n");
5214 			ret_val = E1000_SUCCESS;
5215 		}
5216 	}
5217 
5218 	/* Clear PHY Reset Asserted bit */
5219 	status = E1000_READ_REG(hw, E1000_STATUS);
5220 	if (status & E1000_STATUS_PHYRA)
5221 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5222 	else
5223 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5224 
5225 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
5226 	if (hw->mac.type <= e1000_ich9lan) {
5227 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5228 		    (hw->phy.type == e1000_phy_igp_3)) {
5229 			e1000_phy_init_script_igp3(hw);
5230 		}
5231 	} else {
5232 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5233 			/* Maybe we should do a basic PHY config */
5234 			DEBUGOUT("EEPROM not present\n");
5235 			ret_val = -E1000_ERR_CONFIG;
5236 		}
5237 	}
5238 
5239 	return ret_val;
5240 }
5241 
5242 /**
5243  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5244  * @hw: pointer to the HW structure
5245  *
5246  * In the case of a PHY power down to save power, or to turn off link during a
5247  * driver unload, or wake on lan is not enabled, remove the link.
5248  **/
5249 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5250 {
5251 	/* If the management interface is not enabled, then power down */
5252 	if (!(hw->mac.ops.check_mng_mode(hw) ||
5253 	      hw->phy.ops.check_reset_block(hw)))
5254 		e1000_power_down_phy_copper(hw);
5255 
5256 	return;
5257 }
5258 
5259 /**
5260  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5261  *  @hw: pointer to the HW structure
5262  *
5263  *  Clears hardware counters specific to the silicon family and calls
5264  *  clear_hw_cntrs_generic to clear all general purpose counters.
5265  **/
5266 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5267 {
5268 	u16 phy_data;
5269 	s32 ret_val;
5270 
5271 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5272 
5273 	e1000_clear_hw_cntrs_base_generic(hw);
5274 
5275 	E1000_READ_REG(hw, E1000_ALGNERRC);
5276 	E1000_READ_REG(hw, E1000_RXERRC);
5277 	E1000_READ_REG(hw, E1000_TNCRS);
5278 	E1000_READ_REG(hw, E1000_CEXTERR);
5279 	E1000_READ_REG(hw, E1000_TSCTC);
5280 	E1000_READ_REG(hw, E1000_TSCTFC);
5281 
5282 	E1000_READ_REG(hw, E1000_MGTPRC);
5283 	E1000_READ_REG(hw, E1000_MGTPDC);
5284 	E1000_READ_REG(hw, E1000_MGTPTC);
5285 
5286 	E1000_READ_REG(hw, E1000_IAC);
5287 	E1000_READ_REG(hw, E1000_ICRXOC);
5288 
5289 	/* Clear PHY statistics registers */
5290 	if ((hw->phy.type == e1000_phy_82578) ||
5291 	    (hw->phy.type == e1000_phy_82579) ||
5292 	    (hw->phy.type == e1000_phy_i217) ||
5293 	    (hw->phy.type == e1000_phy_82577)) {
5294 		ret_val = hw->phy.ops.acquire(hw);
5295 		if (ret_val)
5296 			return;
5297 		ret_val = hw->phy.ops.set_page(hw,
5298 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
5299 		if (ret_val)
5300 			goto release;
5301 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5302 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5303 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5304 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5305 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5306 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5307 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5308 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5309 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5310 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5311 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5312 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5313 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5314 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5315 release:
5316 		hw->phy.ops.release(hw);
5317 	}
5318 }
5319 
5320