xref: /dragonfly/sys/dev/netif/ig_hal/e1000_ich8lan.c (revision 548a3528)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2012, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD:$*/
34 
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  */
63 
64 #include "e1000_api.h"
65 
66 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
67 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
68 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
69 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
70 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
71 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
72 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
73 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
74 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
75 					      u8 *mc_addr_list,
76 					      u32 mc_addr_count);
77 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
78 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
79 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
80 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
81 					    bool active);
82 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
83 					    bool active);
84 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
85 				   u16 words, u16 *data);
86 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
87 				    u16 words, u16 *data);
88 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
89 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
90 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
91 					    u16 *data);
92 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
93 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
94 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
95 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
96 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
97 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
98 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
99 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
100 					   u16 *speed, u16 *duplex);
101 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
102 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
103 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
104 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
105 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
106 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
107 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
108 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
109 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
111 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
112 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
113 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
114 					  u32 offset, u8 *data);
115 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
116 					  u8 size, u16 *data);
117 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
118 					  u32 offset, u16 *data);
119 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
120 						 u32 offset, u8 byte);
121 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
122 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
123 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
124 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
125 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
126 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
127 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
128 
129 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
130 /* Offset 04h HSFSTS */
131 union ich8_hws_flash_status {
132 	struct ich8_hsfsts {
133 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
134 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
135 		u16 dael:1; /* bit 2 Direct Access error Log */
136 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
137 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
138 		u16 reserved1:2; /* bit 13:6 Reserved */
139 		u16 reserved2:6; /* bit 13:6 Reserved */
140 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
141 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
142 	} hsf_status;
143 	u16 regval;
144 };
145 
146 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
147 /* Offset 06h FLCTL */
148 union ich8_hws_flash_ctrl {
149 	struct ich8_hsflctl {
150 		u16 flcgo:1;   /* 0 Flash Cycle Go */
151 		u16 flcycle:2;   /* 2:1 Flash Cycle */
152 		u16 reserved:5;   /* 7:3 Reserved  */
153 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
154 		u16 flockdn:6;   /* 15:10 Reserved */
155 	} hsf_ctrl;
156 	u16 regval;
157 };
158 
159 /* ICH Flash Region Access Permissions */
160 union ich8_hws_flash_regacc {
161 	struct ich8_flracc {
162 		u32 grra:8; /* 0:7 GbE region Read Access */
163 		u32 grwa:8; /* 8:15 GbE region Write Access */
164 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
165 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
166 	} hsf_flregacc;
167 	u16 regval;
168 };
169 
170 /**
171  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
172  *  @hw: pointer to the HW structure
173  *
174  *  Test access to the PHY registers by reading the PHY ID registers.  If
175  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
176  *  otherwise assume the read PHY ID is correct if it is valid.
177  *
178  *  Assumes the sw/fw/hw semaphore is already acquired.
179  **/
180 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
181 {
182 	u16 phy_reg = 0;
183 	u32 phy_id = 0;
184 	s32 ret_val;
185 	u16 retry_count;
186 
187 	for (retry_count = 0; retry_count < 2; retry_count++) {
188 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
189 		if (ret_val || (phy_reg == 0xFFFF))
190 			continue;
191 		phy_id = (u32)(phy_reg << 16);
192 
193 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
194 		if (ret_val || (phy_reg == 0xFFFF)) {
195 			phy_id = 0;
196 			continue;
197 		}
198 		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
199 		break;
200 	}
201 
202 	if (hw->phy.id) {
203 		if  (hw->phy.id == phy_id)
204 			return TRUE;
205 	} else if (phy_id) {
206 		hw->phy.id = phy_id;
207 		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
208 		return TRUE;
209 	}
210 
211 	/* In case the PHY needs to be in mdio slow mode,
212 	 * set slow mode and try to get the PHY id again.
213 	 */
214 	hw->phy.ops.release(hw);
215 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
216 	if (!ret_val)
217 		ret_val = e1000_get_phy_id(hw);
218 	hw->phy.ops.acquire(hw);
219 
220 	return !ret_val;
221 }
222 
223 /**
224  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
225  *  @hw: pointer to the HW structure
226  *
227  *  Workarounds/flow necessary for PHY initialization during driver load
228  *  and resume paths.
229  **/
230 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
231 {
232 	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
233 	s32 ret_val;
234 	u16 phy_reg;
235 
236 	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
237 
238 	/* Gate automatic PHY configuration by hardware on managed and
239 	 * non-managed 82579 and newer adapters.
240 	 */
241 	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
242 
243 	ret_val = hw->phy.ops.acquire(hw);
244 	if (ret_val) {
245 		DEBUGOUT("Failed to initialize PHY flow\n");
246 		goto out;
247 	}
248 
249 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
250 	 * inaccessible and resetting the PHY is not blocked, toggle the
251 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
252 	 */
253 	switch (hw->mac.type) {
254 	case e1000_pch_lpt:
255 		if (e1000_phy_is_accessible_pchlan(hw))
256 			break;
257 
258 		/* Before toggling LANPHYPC, see if PHY is accessible by
259 		 * forcing MAC to SMBus mode first.
260 		 */
261 		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
262 		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
263 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
264 
265 		/* fall-through */
266 	case e1000_pch2lan:
267 		if (e1000_phy_is_accessible_pchlan(hw)) {
268 			if (hw->mac.type == e1000_pch_lpt) {
269 				/* Unforce SMBus mode in PHY */
270 				hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL,
271 							    &phy_reg);
272 				phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
273 				hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL,
274 							     phy_reg);
275 
276 				/* Unforce SMBus mode in MAC */
277 				mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
278 				mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
279 				E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
280 			}
281 			break;
282 		}
283 
284 		/* fall-through */
285 	case e1000_pchlan:
286 		if ((hw->mac.type == e1000_pchlan) &&
287 		    (fwsm & E1000_ICH_FWSM_FW_VALID))
288 			break;
289 
290 		if (hw->phy.ops.check_reset_block(hw)) {
291 			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
292 			break;
293 		}
294 
295 		DEBUGOUT("Toggling LANPHYPC\n");
296 
297 		/* Set Phy Config Counter to 50msec */
298 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
299 		mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
300 		mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
301 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
302 
303 		if (hw->mac.type == e1000_pch_lpt) {
304 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
305 			 * So ensure that the MAC is also out of SMBus mode
306 			 */
307 			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
308 			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
309 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
310 		}
311 
312 		/* Toggle LANPHYPC Value bit */
313 		mac_reg = E1000_READ_REG(hw, E1000_CTRL);
314 		mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
315 		mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
316 		E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
317 		E1000_WRITE_FLUSH(hw);
318 		usec_delay(10);
319 		mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
320 		E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
321 		E1000_WRITE_FLUSH(hw);
322 		if (hw->mac.type < e1000_pch_lpt) {
323 			msec_delay(50);
324 		} else {
325 			u16 count = 20;
326 			do {
327 				msec_delay(5);
328 			} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
329 				   E1000_CTRL_EXT_LPCD) && count--);
330 		}
331 		break;
332 	default:
333 		break;
334 	}
335 
336 	hw->phy.ops.release(hw);
337 
338 	/* Reset the PHY before any access to it.  Doing so, ensures
339 	 * that the PHY is in a known good state before we read/write
340 	 * PHY registers.  The generic reset is sufficient here,
341 	 * because we haven't determined the PHY type yet.
342 	 */
343 	ret_val = e1000_phy_hw_reset_generic(hw);
344 
345 out:
346 	/* Ungate automatic PHY configuration on non-managed 82579 */
347 	if ((hw->mac.type == e1000_pch2lan) &&
348 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
349 		msec_delay(10);
350 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
351 	}
352 
353 	return ret_val;
354 }
355 
356 /**
357  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
358  *  @hw: pointer to the HW structure
359  *
360  *  Initialize family-specific PHY parameters and function pointers.
361  **/
362 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
363 {
364 	struct e1000_phy_info *phy = &hw->phy;
365 	s32 ret_val;
366 
367 	DEBUGFUNC("e1000_init_phy_params_pchlan");
368 
369 	phy->addr		= 1;
370 	phy->reset_delay_us	= 100;
371 
372 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
373 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
374 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
375 	phy->ops.set_page	= e1000_set_page_igp;
376 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
377 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
378 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
379 	phy->ops.release	= e1000_release_swflag_ich8lan;
380 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
381 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
382 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
383 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
384 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
385 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
386 	phy->ops.power_up	= e1000_power_up_phy_copper;
387 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
388 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
389 
390 	phy->id = e1000_phy_unknown;
391 
392 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
393 	if (ret_val)
394 		return ret_val;
395 
396 	if (phy->id == e1000_phy_unknown)
397 		switch (hw->mac.type) {
398 		default:
399 			ret_val = e1000_get_phy_id(hw);
400 			if (ret_val)
401 				return ret_val;
402 			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
403 				break;
404 			/* fall-through */
405 		case e1000_pch2lan:
406 		case e1000_pch_lpt:
407 			/* In case the PHY needs to be in mdio slow mode,
408 			 * set slow mode and try to get the PHY id again.
409 			 */
410 			ret_val = e1000_set_mdio_slow_mode_hv(hw);
411 			if (ret_val)
412 				return ret_val;
413 			ret_val = e1000_get_phy_id(hw);
414 			if (ret_val)
415 				return ret_val;
416 			break;
417 		}
418 	phy->type = e1000_get_phy_type_from_id(phy->id);
419 
420 	switch (phy->type) {
421 	case e1000_phy_82577:
422 	case e1000_phy_82579:
423 	case e1000_phy_i217:
424 		phy->ops.check_polarity = e1000_check_polarity_82577;
425 		phy->ops.force_speed_duplex =
426 			e1000_phy_force_speed_duplex_82577;
427 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
428 		phy->ops.get_info = e1000_get_phy_info_82577;
429 		phy->ops.commit = e1000_phy_sw_reset_generic;
430 		break;
431 	case e1000_phy_82578:
432 		phy->ops.check_polarity = e1000_check_polarity_m88;
433 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
434 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
435 		phy->ops.get_info = e1000_get_phy_info_m88;
436 		break;
437 	default:
438 		ret_val = -E1000_ERR_PHY;
439 		break;
440 	}
441 
442 	return ret_val;
443 }
444 
445 /**
446  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
447  *  @hw: pointer to the HW structure
448  *
449  *  Initialize family-specific PHY parameters and function pointers.
450  **/
451 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
452 {
453 	struct e1000_phy_info *phy = &hw->phy;
454 	s32 ret_val;
455 	u16 i = 0;
456 
457 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
458 
459 	phy->addr		= 1;
460 	phy->reset_delay_us	= 100;
461 
462 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
463 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
464 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
465 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
466 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
467 	phy->ops.release	= e1000_release_swflag_ich8lan;
468 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
469 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
470 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
471 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
472 	phy->ops.power_up	= e1000_power_up_phy_copper;
473 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
474 
475 	/* We may need to do this twice - once for IGP and if that fails,
476 	 * we'll set BM func pointers and try again
477 	 */
478 	ret_val = e1000_determine_phy_address(hw);
479 	if (ret_val) {
480 		phy->ops.write_reg = e1000_write_phy_reg_bm;
481 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
482 		ret_val = e1000_determine_phy_address(hw);
483 		if (ret_val) {
484 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
485 			return ret_val;
486 		}
487 	}
488 
489 	phy->id = 0;
490 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
491 	       (i++ < 100)) {
492 		msec_delay(1);
493 		ret_val = e1000_get_phy_id(hw);
494 		if (ret_val)
495 			return ret_val;
496 	}
497 
498 	/* Verify phy id */
499 	switch (phy->id) {
500 	case IGP03E1000_E_PHY_ID:
501 		phy->type = e1000_phy_igp_3;
502 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
503 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
504 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
505 		phy->ops.get_info = e1000_get_phy_info_igp;
506 		phy->ops.check_polarity = e1000_check_polarity_igp;
507 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
508 		break;
509 	case IFE_E_PHY_ID:
510 	case IFE_PLUS_E_PHY_ID:
511 	case IFE_C_E_PHY_ID:
512 		phy->type = e1000_phy_ife;
513 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
514 		phy->ops.get_info = e1000_get_phy_info_ife;
515 		phy->ops.check_polarity = e1000_check_polarity_ife;
516 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
517 		break;
518 	case BME1000_E_PHY_ID:
519 		phy->type = e1000_phy_bm;
520 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
521 		phy->ops.read_reg = e1000_read_phy_reg_bm;
522 		phy->ops.write_reg = e1000_write_phy_reg_bm;
523 		phy->ops.commit = e1000_phy_sw_reset_generic;
524 		phy->ops.get_info = e1000_get_phy_info_m88;
525 		phy->ops.check_polarity = e1000_check_polarity_m88;
526 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
527 		break;
528 	default:
529 		return -E1000_ERR_PHY;
530 		break;
531 	}
532 
533 	return E1000_SUCCESS;
534 }
535 
536 /**
537  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
538  *  @hw: pointer to the HW structure
539  *
540  *  Initialize family-specific NVM parameters and function
541  *  pointers.
542  **/
543 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
544 {
545 	struct e1000_nvm_info *nvm = &hw->nvm;
546 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
547 	u32 gfpreg, sector_base_addr, sector_end_addr;
548 	u16 i;
549 
550 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
551 
552 	/* Can't read flash registers if the register set isn't mapped. */
553 	if (!hw->flash_address) {
554 		DEBUGOUT("ERROR: Flash registers not mapped\n");
555 		return -E1000_ERR_CONFIG;
556 	}
557 
558 	nvm->type = e1000_nvm_flash_sw;
559 
560 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
561 
562 	/* sector_X_addr is a "sector"-aligned address (4096 bytes)
563 	 * Add 1 to sector_end_addr since this sector is included in
564 	 * the overall size.
565 	 */
566 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
567 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
568 
569 	/* flash_base_addr is byte-aligned */
570 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
571 
572 	/* find total size of the NVM, then cut in half since the total
573 	 * size represents two separate NVM banks.
574 	 */
575 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
576 				<< FLASH_SECTOR_ADDR_SHIFT;
577 	nvm->flash_bank_size /= 2;
578 	/* Adjust to word count */
579 	nvm->flash_bank_size /= sizeof(u16);
580 
581 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
582 
583 	/* Clear shadow ram */
584 	for (i = 0; i < nvm->word_size; i++) {
585 		dev_spec->shadow_ram[i].modified = FALSE;
586 		dev_spec->shadow_ram[i].value    = 0xFFFF;
587 	}
588 
589 	/* Function Pointers */
590 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
591 	nvm->ops.release	= e1000_release_nvm_ich8lan;
592 	nvm->ops.read		= e1000_read_nvm_ich8lan;
593 	nvm->ops.update		= e1000_update_nvm_checksum_ich8lan;
594 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
595 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
596 	nvm->ops.write		= e1000_write_nvm_ich8lan;
597 
598 	return E1000_SUCCESS;
599 }
600 
601 /**
602  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
603  *  @hw: pointer to the HW structure
604  *
605  *  Initialize family-specific MAC parameters and function
606  *  pointers.
607  **/
608 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
609 {
610 	struct e1000_mac_info *mac = &hw->mac;
611 	u16 pci_cfg;
612 
613 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
614 
615 	/* Set media type function pointer */
616 	hw->phy.media_type = e1000_media_type_copper;
617 
618 	/* Set mta register count */
619 	mac->mta_reg_count = 32;
620 	/* Set rar entry count */
621 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
622 	if (mac->type == e1000_ich8lan)
623 		mac->rar_entry_count--;
624 	/* Set if part includes ASF firmware */
625 	mac->asf_firmware_present = TRUE;
626 	/* FWSM register */
627 	mac->has_fwsm = TRUE;
628 	/* ARC subsystem not supported */
629 	mac->arc_subsystem_valid = FALSE;
630 	/* Adaptive IFS supported */
631 	mac->adaptive_ifs = TRUE;
632 
633 	/* Function pointers */
634 
635 	/* bus type/speed/width */
636 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
637 	/* function id */
638 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
639 	/* reset */
640 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
641 	/* hw initialization */
642 	mac->ops.init_hw = e1000_init_hw_ich8lan;
643 	/* link setup */
644 	mac->ops.setup_link = e1000_setup_link_ich8lan;
645 	/* physical interface setup */
646 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
647 	/* check for link */
648 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
649 	/* link info */
650 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
651 	/* multicast address update */
652 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
653 	/* clear hardware counters */
654 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
655 
656 	/* LED and other operations */
657 	switch (mac->type) {
658 	case e1000_ich8lan:
659 	case e1000_ich9lan:
660 	case e1000_ich10lan:
661 		/* check management mode */
662 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
663 		/* ID LED init */
664 		mac->ops.id_led_init = e1000_id_led_init_generic;
665 		/* blink LED */
666 		mac->ops.blink_led = e1000_blink_led_generic;
667 		/* setup LED */
668 		mac->ops.setup_led = e1000_setup_led_generic;
669 		/* cleanup LED */
670 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
671 		/* turn on/off LED */
672 		mac->ops.led_on = e1000_led_on_ich8lan;
673 		mac->ops.led_off = e1000_led_off_ich8lan;
674 		break;
675 	case e1000_pch2lan:
676 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
677 		mac->ops.rar_set = e1000_rar_set_pch2lan;
678 		/* fall-through */
679 	case e1000_pch_lpt:
680 		/* multicast address update for pch2 */
681 		mac->ops.update_mc_addr_list =
682 			e1000_update_mc_addr_list_pch2lan;
683 	case e1000_pchlan:
684 		/* save PCH revision_id */
685 		e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
686 		hw->revision_id = (u8)(pci_cfg &= 0x000F);
687 		/* check management mode */
688 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
689 		/* ID LED init */
690 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
691 		/* setup LED */
692 		mac->ops.setup_led = e1000_setup_led_pchlan;
693 		/* cleanup LED */
694 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
695 		/* turn on/off LED */
696 		mac->ops.led_on = e1000_led_on_pchlan;
697 		mac->ops.led_off = e1000_led_off_pchlan;
698 		break;
699 	default:
700 		break;
701 	}
702 
703 	if (mac->type == e1000_pch_lpt) {
704 		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
705 		mac->ops.rar_set = e1000_rar_set_pch_lpt;
706 		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
707 		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
708 	}
709 
710 	/* Enable PCS Lock-loss workaround for ICH8 */
711 	if (mac->type == e1000_ich8lan)
712 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
713 
714 	return E1000_SUCCESS;
715 }
716 
717 /**
718  *  __e1000_access_emi_reg_locked - Read/write EMI register
719  *  @hw: pointer to the HW structure
720  *  @addr: EMI address to program
721  *  @data: pointer to value to read/write from/to the EMI address
722  *  @read: boolean flag to indicate read or write
723  *
724  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
725  **/
726 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
727 					 u16 *data, bool read)
728 {
729 	s32 ret_val;
730 
731 	DEBUGFUNC("__e1000_access_emi_reg_locked");
732 
733 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
734 	if (ret_val)
735 		return ret_val;
736 
737 	if (read)
738 		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
739 						      data);
740 	else
741 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
742 						       *data);
743 
744 	return ret_val;
745 }
746 
747 /**
748  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
749  *  @hw: pointer to the HW structure
750  *  @addr: EMI address to program
751  *  @data: value to be read from the EMI address
752  *
753  *  Assumes the SW/FW/HW Semaphore is already acquired.
754  **/
755 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
756 {
757 	DEBUGFUNC("e1000_read_emi_reg_locked");
758 
759 	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
760 }
761 
762 /**
763  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
764  *  @hw: pointer to the HW structure
765  *  @addr: EMI address to program
766  *  @data: value to be written to the EMI address
767  *
768  *  Assumes the SW/FW/HW Semaphore is already acquired.
769  **/
770 static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
771 {
772 	DEBUGFUNC("e1000_read_emi_reg_locked");
773 
774 	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
775 }
776 
777 /**
778  *  e1000_set_eee_pchlan - Enable/disable EEE support
779  *  @hw: pointer to the HW structure
780  *
781  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
782  *  the link and the EEE capabilities of the link partner.  The LPI Control
783  *  register bits will remain set only if/when link is up.
784  **/
785 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
786 {
787 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
788 	s32 ret_val;
789 	u16 lpi_ctrl;
790 
791 	DEBUGFUNC("e1000_set_eee_pchlan");
792 
793 	if ((hw->phy.type != e1000_phy_82579) &&
794 	    (hw->phy.type != e1000_phy_i217))
795 		return E1000_SUCCESS;
796 
797 	ret_val = hw->phy.ops.acquire(hw);
798 	if (ret_val)
799 		return ret_val;
800 
801 	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
802 	if (ret_val)
803 		goto release;
804 
805 	/* Clear bits that enable EEE in various speeds */
806 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
807 
808 	/* Enable EEE if not disabled by user */
809 	if (!dev_spec->eee_disable) {
810 		u16 lpa, pcs_status, data;
811 
812 		/* Save off link partner's EEE ability */
813 		switch (hw->phy.type) {
814 		case e1000_phy_82579:
815 			lpa = I82579_EEE_LP_ABILITY;
816 			pcs_status = I82579_EEE_PCS_STATUS;
817 			break;
818 		case e1000_phy_i217:
819 			lpa = I217_EEE_LP_ABILITY;
820 			pcs_status = I217_EEE_PCS_STATUS;
821 			break;
822 		default:
823 			ret_val = -E1000_ERR_PHY;
824 			goto release;
825 		}
826 		ret_val = e1000_read_emi_reg_locked(hw, lpa,
827 						    &dev_spec->eee_lp_ability);
828 		if (ret_val)
829 			goto release;
830 
831 		/* Enable EEE only for speeds in which the link partner is
832 		 * EEE capable.
833 		 */
834 		if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
835 			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
836 
837 		if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
838 			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
839 			if (data & NWAY_LPAR_100TX_FD_CAPS)
840 				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
841 			else
842 				/* EEE is not supported in 100Half, so ignore
843 				 * partner's EEE in 100 ability if full-duplex
844 				 * is not advertised.
845 				 */
846 				dev_spec->eee_lp_ability &=
847 				    ~I82579_EEE_100_SUPPORTED;
848 		}
849 
850 		/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
851 		ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
852 		if (ret_val)
853 			goto release;
854 	}
855 
856 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
857 release:
858 	hw->phy.ops.release(hw);
859 
860 	return ret_val;
861 }
862 
863 /**
864  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
865  *  @hw:   pointer to the HW structure
866  *  @link: link up bool flag
867  *
868  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
869  *  preventing further DMA write requests.  Workaround the issue by disabling
870  *  the de-assertion of the clock request when in 1Gpbs mode.
871  **/
872 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
873 {
874 	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
875 	s32 ret_val = E1000_SUCCESS;
876 
877 	if (link && (E1000_READ_REG(hw, E1000_STATUS) &
878 		     E1000_STATUS_SPEED_1000)) {
879 		u16 kmrn_reg;
880 
881 		ret_val = hw->phy.ops.acquire(hw);
882 		if (ret_val)
883 			return ret_val;
884 
885 		ret_val =
886 		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
887 					       &kmrn_reg);
888 		if (ret_val)
889 			goto release;
890 
891 		ret_val =
892 		    e1000_write_kmrn_reg_locked(hw,
893 						E1000_KMRNCTRLSTA_K1_CONFIG,
894 						kmrn_reg &
895 						~E1000_KMRNCTRLSTA_K1_ENABLE);
896 		if (ret_val)
897 			goto release;
898 
899 		usec_delay(10);
900 
901 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
902 				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
903 
904 		ret_val =
905 		    e1000_write_kmrn_reg_locked(hw,
906 						E1000_KMRNCTRLSTA_K1_CONFIG,
907 						kmrn_reg);
908 release:
909 		hw->phy.ops.release(hw);
910 	} else {
911 		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
912 		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
913 				fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
914 	}
915 
916 	return ret_val;
917 }
918 
919 static u64 e1000_ltr2ns(u16 ltr)
920 {
921 	u32 value, scale;
922 
923 	/* Determine the latency in nsec based on the LTR value & scale */
924 	value = ltr & E1000_LTRV_VALUE_MASK;
925 	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
926 
927 	return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
928 }
929 
930 /**
931  *  e1000_platform_pm_pch_lpt - Set platform power management values
932  *  @hw: pointer to the HW structure
933  *  @link: bool indicating link status
934  *
935  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
936  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
937  *  when link is up (which must not exceed the maximum latency supported
938  *  by the platform), otherwise specify there is no LTR requirement.
939  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
940  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
941  *  Capability register set, on this device LTR is set by writing the
942  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
943  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
944  *  message to the PMC.
945  *
946  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
947  *  high-water mark.
948  **/
949 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
950 {
951 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
952 		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
953 	u16 lat_enc = 0;	/* latency encoded */
954 	s32 obff_hwm = 0;
955 
956 	DEBUGFUNC("e1000_platform_pm_pch_lpt");
957 
958 	if (link) {
959 		u16 speed, duplex, scale = 0;
960 		u16 max_snoop, max_nosnoop;
961 		u16 max_ltr_enc;	/* max LTR latency encoded */
962 		s64 lat_ns;		/* latency (ns) */
963 		s64 value;
964 		u32 rxa;
965 
966 		if (!hw->mac.max_frame_size) {
967 			DEBUGOUT("max_frame_size not set.\n");
968 			return -E1000_ERR_CONFIG;
969 		}
970 
971 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
972 		if (!speed) {
973 			DEBUGOUT("Speed not set.\n");
974 			return -E1000_ERR_CONFIG;
975 		}
976 
977 		/* Rx Packet Buffer Allocation size (KB) */
978 		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
979 
980 		/* Determine the maximum latency tolerated by the device.
981 		 *
982 		 * Per the PCIe spec, the tolerated latencies are encoded as
983 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
984 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
985 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
986 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
987 		 */
988 		lat_ns = ((s64)rxa * 1024 -
989 			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
990 		if (lat_ns < 0)
991 			lat_ns = 0;
992 		else
993 			lat_ns /= speed;
994 
995 		value = lat_ns;
996 		while (value > E1000_LTRV_VALUE_MASK) {
997 			scale++;
998 			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
999 		}
1000 		if (scale > E1000_LTRV_SCALE_MAX) {
1001 			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1002 			return -E1000_ERR_CONFIG;
1003 		}
1004 		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1005 
1006 		/* Determine the maximum latency tolerated by the platform */
1007 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1008 		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1009 		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1010 
1011 		if (lat_enc > max_ltr_enc) {
1012 			lat_enc = max_ltr_enc;
1013 			lat_ns = e1000_ltr2ns(max_ltr_enc);
1014 		}
1015 
1016 		if (lat_ns) {
1017 			lat_ns *= speed * 1000;
1018 			lat_ns /= 8;
1019 			lat_ns /= 1000000000;
1020 			obff_hwm = (s32)(rxa - lat_ns);
1021 		}
1022 
1023 		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1024 			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1025 			return -E1000_ERR_CONFIG;
1026 		}
1027 	}
1028 
1029 	/* Set Snoop and No-Snoop latencies the same */
1030 	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1031 	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1032 
1033 	/* Set OBFF high water mark */
1034 	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1035 	reg |= obff_hwm;
1036 	E1000_WRITE_REG(hw, E1000_SVT, reg);
1037 
1038 	/* Enable OBFF */
1039 	reg = E1000_READ_REG(hw, E1000_SVCR);
1040 	reg |= E1000_SVCR_OFF_EN;
1041 	/* Always unblock interrupts to the CPU even when the system is
1042 	 * in OBFF mode. This ensures that small round-robin traffic
1043 	 * (like ping) does not get dropped or experience long latency.
1044 	 */
1045 	reg |= E1000_SVCR_OFF_MASKINT;
1046 	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1047 
1048 	return E1000_SUCCESS;
1049 }
1050 
1051 /**
1052  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1053  *  @hw: pointer to the HW structure
1054  *  @itr: interrupt throttling rate
1055  *
1056  *  Configure OBFF with the updated interrupt rate.
1057  **/
1058 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1059 {
1060 	u32 svcr;
1061 	s32 timer;
1062 
1063 	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1064 
1065 	/* Convert ITR value into microseconds for OBFF timer */
1066 	timer = itr & E1000_ITR_MASK;
1067 	timer = (timer * E1000_ITR_MULT) / 1000;
1068 
1069 	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1070 		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1071 		return -E1000_ERR_CONFIG;
1072 	}
1073 
1074 	svcr = E1000_READ_REG(hw, E1000_SVCR);
1075 	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1076 	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1077 	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1078 
1079 	return E1000_SUCCESS;
1080 }
1081 
1082 /**
1083  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1084  *  @hw: pointer to the HW structure
1085  *
1086  *  Checks to see of the link status of the hardware has changed.  If a
1087  *  change in link status has been detected, then we read the PHY registers
1088  *  to get the current speed/duplex if link exists.
1089  **/
1090 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1091 {
1092 	struct e1000_mac_info *mac = &hw->mac;
1093 	s32 ret_val;
1094 	bool link;
1095 	u16 phy_reg;
1096 
1097 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1098 
1099 	/* We only want to go out to the PHY registers to see if Auto-Neg
1100 	 * has completed and/or if our link status has changed.  The
1101 	 * get_link_status flag is set upon receiving a Link Status
1102 	 * Change or Rx Sequence Error interrupt.
1103 	 */
1104 	if (!mac->get_link_status)
1105 		return E1000_SUCCESS;
1106 
1107 	/* First we want to see if the MII Status Register reports
1108 	 * link.  If so, then we want to get the current speed/duplex
1109 	 * of the PHY.
1110 	 */
1111 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1112 	if (ret_val)
1113 		return ret_val;
1114 
1115 	if (hw->mac.type == e1000_pchlan) {
1116 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1117 		if (ret_val)
1118 			return ret_val;
1119 	}
1120 
1121 	/* When connected at 10Mbps half-duplex, 82579 parts are excessively
1122 	 * aggressive resulting in many collisions. To avoid this, increase
1123 	 * the IPG and reduce Rx latency in the PHY.
1124 	 */
1125 	if ((hw->mac.type == e1000_pch2lan) && link) {
1126 		u32 reg;
1127 		reg = E1000_READ_REG(hw, E1000_STATUS);
1128 		if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1129 			reg = E1000_READ_REG(hw, E1000_TIPG);
1130 			reg &= ~E1000_TIPG_IPGT_MASK;
1131 			reg |= 0xFF;
1132 			E1000_WRITE_REG(hw, E1000_TIPG, reg);
1133 
1134 			/* Reduce Rx latency in analog PHY */
1135 			ret_val = hw->phy.ops.acquire(hw);
1136 			if (ret_val)
1137 				return ret_val;
1138 
1139 			ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
1140 
1141 			hw->phy.ops.release(hw);
1142 
1143 			if (ret_val)
1144 				return ret_val;
1145 		}
1146 	}
1147 
1148 	/* Work-around I218 hang issue */
1149 	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1150 	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
1151 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1152 		if (ret_val)
1153 			return ret_val;
1154 	}
1155 
1156 	if (hw->mac.type == e1000_pch_lpt) {
1157 		/* Set platform power management values for Latency Tolerance
1158 		 * Reporting (LTR) and Optimized Buffer Flush/Fill (OBFF).
1159 		 */
1160 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1161 		if (ret_val)
1162 			return ret_val;
1163 	}
1164 
1165 	/* Clear link partner's EEE ability */
1166 	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1167 
1168 	if (!link)
1169 		return E1000_SUCCESS; /* No link detected */
1170 
1171 	mac->get_link_status = FALSE;
1172 
1173 	switch (hw->mac.type) {
1174 	case e1000_pch2lan:
1175 		ret_val = e1000_k1_workaround_lv(hw);
1176 		if (ret_val)
1177 			return ret_val;
1178 		/* fall-thru */
1179 	case e1000_pchlan:
1180 		if (hw->phy.type == e1000_phy_82578) {
1181 			ret_val = e1000_link_stall_workaround_hv(hw);
1182 			if (ret_val)
1183 				return ret_val;
1184 		}
1185 
1186 		/* Workaround for PCHx parts in half-duplex:
1187 		 * Set the number of preambles removed from the packet
1188 		 * when it is passed from the PHY to the MAC to prevent
1189 		 * the MAC from misinterpreting the packet type.
1190 		 */
1191 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1192 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1193 
1194 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1195 		    E1000_STATUS_FD)
1196 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1197 
1198 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1199 		break;
1200 	default:
1201 		break;
1202 	}
1203 
1204 	/* Check if there was DownShift, must be checked
1205 	 * immediately after link-up
1206 	 */
1207 	e1000_check_downshift_generic(hw);
1208 
1209 	/* Enable/Disable EEE after link up */
1210 	ret_val = e1000_set_eee_pchlan(hw);
1211 	if (ret_val)
1212 		return ret_val;
1213 
1214 	/* If we are forcing speed/duplex, then we simply return since
1215 	 * we have already determined whether we have link or not.
1216 	 */
1217 	if (!mac->autoneg)
1218 		return -E1000_ERR_CONFIG;
1219 
1220 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1221 	 * of MAC speed/duplex configuration.  So we only need to
1222 	 * configure Collision Distance in the MAC.
1223 	 */
1224 	mac->ops.config_collision_dist(hw);
1225 
1226 	/* Configure Flow Control now that Auto-Neg has completed.
1227 	 * First, we need to restore the desired flow control
1228 	 * settings because we may have had to re-autoneg with a
1229 	 * different link partner.
1230 	 */
1231 	ret_val = e1000_config_fc_after_link_up_generic(hw);
1232 	if (ret_val)
1233 		DEBUGOUT("Error configuring flow control\n");
1234 
1235 	return ret_val;
1236 }
1237 
1238 /**
1239  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1240  *  @hw: pointer to the HW structure
1241  *
1242  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1243  **/
1244 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1245 {
1246 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1247 
1248 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1249 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1250 	switch (hw->mac.type) {
1251 	case e1000_ich8lan:
1252 	case e1000_ich9lan:
1253 	case e1000_ich10lan:
1254 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1255 		break;
1256 	case e1000_pchlan:
1257 	case e1000_pch2lan:
1258 	case e1000_pch_lpt:
1259 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1260 		break;
1261 	default:
1262 		break;
1263 	}
1264 }
1265 
1266 /**
1267  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1268  *  @hw: pointer to the HW structure
1269  *
1270  *  Acquires the mutex for performing NVM operations.
1271  **/
1272 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1273 {
1274 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1275 	return E1000_SUCCESS;
1276 }
1277 
1278 /**
1279  *  e1000_release_nvm_ich8lan - Release NVM mutex
1280  *  @hw: pointer to the HW structure
1281  *
1282  *  Releases the mutex used while performing NVM operations.
1283  **/
1284 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1285 {
1286 	DEBUGFUNC("e1000_release_nvm_ich8lan");
1287 	return;
1288 }
1289 
1290 /**
1291  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1292  *  @hw: pointer to the HW structure
1293  *
1294  *  Acquires the software control flag for performing PHY and select
1295  *  MAC CSR accesses.
1296  **/
1297 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1298 {
1299 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1300 	s32 ret_val = E1000_SUCCESS;
1301 
1302 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1303 
1304 	while (timeout) {
1305 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1306 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1307 			break;
1308 
1309 		msec_delay_irq(1);
1310 		timeout--;
1311 	}
1312 
1313 	if (!timeout) {
1314 		DEBUGOUT("SW has already locked the resource.\n");
1315 		ret_val = -E1000_ERR_CONFIG;
1316 		goto out;
1317 	}
1318 
1319 	timeout = SW_FLAG_TIMEOUT;
1320 
1321 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1322 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1323 
1324 	while (timeout) {
1325 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1326 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1327 			break;
1328 
1329 		msec_delay_irq(1);
1330 		timeout--;
1331 	}
1332 
1333 	if (!timeout) {
1334 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1335 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1336 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1337 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1338 		ret_val = -E1000_ERR_CONFIG;
1339 		goto out;
1340 	}
1341 
1342 out:
1343 	return ret_val;
1344 }
1345 
1346 /**
1347  *  e1000_release_swflag_ich8lan - Release software control flag
1348  *  @hw: pointer to the HW structure
1349  *
1350  *  Releases the software control flag for performing PHY and select
1351  *  MAC CSR accesses.
1352  **/
1353 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1354 {
1355 	u32 extcnf_ctrl;
1356 
1357 	DEBUGFUNC("e1000_release_swflag_ich8lan");
1358 
1359 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1360 
1361 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1362 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1363 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1364 	} else {
1365 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1366 	}
1367 	return;
1368 }
1369 
1370 /**
1371  *  e1000_check_mng_mode_ich8lan - Checks management mode
1372  *  @hw: pointer to the HW structure
1373  *
1374  *  This checks if the adapter has any manageability enabled.
1375  *  This is a function pointer entry point only called by read/write
1376  *  routines for the PHY and NVM parts.
1377  **/
1378 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1379 {
1380 	u32 fwsm;
1381 
1382 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1383 
1384 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1385 
1386 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1387 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1388 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1389 }
1390 
1391 /**
1392  *  e1000_check_mng_mode_pchlan - Checks management mode
1393  *  @hw: pointer to the HW structure
1394  *
1395  *  This checks if the adapter has iAMT enabled.
1396  *  This is a function pointer entry point only called by read/write
1397  *  routines for the PHY and NVM parts.
1398  **/
1399 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1400 {
1401 	u32 fwsm;
1402 
1403 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1404 
1405 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1406 
1407 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1408 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1409 }
1410 
1411 /**
1412  *  e1000_rar_set_pch2lan - Set receive address register
1413  *  @hw: pointer to the HW structure
1414  *  @addr: pointer to the receive address
1415  *  @index: receive address array register
1416  *
1417  *  Sets the receive address array register at index to the address passed
1418  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1419  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1420  *  Use SHRA[0-3] in place of those reserved for ME.
1421  **/
1422 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1423 {
1424 	u32 rar_low, rar_high;
1425 
1426 	DEBUGFUNC("e1000_rar_set_pch2lan");
1427 
1428 	/* HW expects these in little endian so we reverse the byte order
1429 	 * from network order (big endian) to little endian
1430 	 */
1431 	rar_low = ((u32) addr[0] |
1432 		   ((u32) addr[1] << 8) |
1433 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1434 
1435 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1436 
1437 	/* If MAC address zero, no need to set the AV bit */
1438 	if (rar_low || rar_high)
1439 		rar_high |= E1000_RAH_AV;
1440 
1441 	if (index == 0) {
1442 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1443 		E1000_WRITE_FLUSH(hw);
1444 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1445 		E1000_WRITE_FLUSH(hw);
1446 		return;
1447 	}
1448 
1449 	if (index < hw->mac.rar_entry_count) {
1450 		s32 ret_val;
1451 
1452 		ret_val = e1000_acquire_swflag_ich8lan(hw);
1453 		if (ret_val)
1454 			goto out;
1455 
1456 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1457 		E1000_WRITE_FLUSH(hw);
1458 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1459 		E1000_WRITE_FLUSH(hw);
1460 
1461 		e1000_release_swflag_ich8lan(hw);
1462 
1463 		/* verify the register updates */
1464 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1465 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1466 			return;
1467 
1468 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1469 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1470 	}
1471 
1472 out:
1473 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1474 }
1475 
1476 /**
1477  *  e1000_rar_set_pch_lpt - Set receive address registers
1478  *  @hw: pointer to the HW structure
1479  *  @addr: pointer to the receive address
1480  *  @index: receive address array register
1481  *
1482  *  Sets the receive address register array at index to the address passed
1483  *  in by addr. For LPT, RAR[0] is the base address register that is to
1484  *  contain the MAC address. SHRA[0-10] are the shared receive address
1485  *  registers that are shared between the Host and manageability engine (ME).
1486  **/
1487 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1488 {
1489 	u32 rar_low, rar_high;
1490 	u32 wlock_mac;
1491 
1492 	DEBUGFUNC("e1000_rar_set_pch_lpt");
1493 
1494 	/* HW expects these in little endian so we reverse the byte order
1495 	 * from network order (big endian) to little endian
1496 	 */
1497 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1498 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1499 
1500 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1501 
1502 	/* If MAC address zero, no need to set the AV bit */
1503 	if (rar_low || rar_high)
1504 		rar_high |= E1000_RAH_AV;
1505 
1506 	if (index == 0) {
1507 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1508 		E1000_WRITE_FLUSH(hw);
1509 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1510 		E1000_WRITE_FLUSH(hw);
1511 		return;
1512 	}
1513 
1514 	/* The manageability engine (ME) can lock certain SHRAR registers that
1515 	 * it is using - those registers are unavailable for use.
1516 	 */
1517 	if (index < hw->mac.rar_entry_count) {
1518 		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1519 			    E1000_FWSM_WLOCK_MAC_MASK;
1520 		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1521 
1522 		/* Check if all SHRAR registers are locked */
1523 		if (wlock_mac == 1)
1524 			goto out;
1525 
1526 		if ((wlock_mac == 0) || (index <= wlock_mac)) {
1527 			s32 ret_val;
1528 
1529 			ret_val = e1000_acquire_swflag_ich8lan(hw);
1530 
1531 			if (ret_val)
1532 				goto out;
1533 
1534 			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1535 					rar_low);
1536 			E1000_WRITE_FLUSH(hw);
1537 			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1538 					rar_high);
1539 			E1000_WRITE_FLUSH(hw);
1540 
1541 			e1000_release_swflag_ich8lan(hw);
1542 
1543 			/* verify the register updates */
1544 			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1545 			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1546 				return;
1547 		}
1548 	}
1549 
1550 out:
1551 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
1552 }
1553 
1554 /**
1555  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1556  *  @hw: pointer to the HW structure
1557  *  @mc_addr_list: array of multicast addresses to program
1558  *  @mc_addr_count: number of multicast addresses to program
1559  *
1560  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1561  *  The caller must have a packed mc_addr_list of multicast addresses.
1562  **/
1563 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1564 					      u8 *mc_addr_list,
1565 					      u32 mc_addr_count)
1566 {
1567 	u16 phy_reg = 0;
1568 	int i;
1569 	s32 ret_val;
1570 
1571 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1572 
1573 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1574 
1575 	ret_val = hw->phy.ops.acquire(hw);
1576 	if (ret_val)
1577 		return;
1578 
1579 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1580 	if (ret_val)
1581 		goto release;
1582 
1583 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
1584 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1585 					   (u16)(hw->mac.mta_shadow[i] &
1586 						 0xFFFF));
1587 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1588 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
1589 						 0xFFFF));
1590 	}
1591 
1592 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1593 
1594 release:
1595 	hw->phy.ops.release(hw);
1596 }
1597 
1598 /**
1599  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1600  *  @hw: pointer to the HW structure
1601  *
1602  *  Checks if firmware is blocking the reset of the PHY.
1603  *  This is a function pointer entry point only called by
1604  *  reset routines.
1605  **/
1606 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1607 {
1608 	u32 fwsm;
1609 
1610 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
1611 
1612 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1613 
1614 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1615 						: E1000_BLK_PHY_RESET;
1616 }
1617 
1618 /**
1619  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1620  *  @hw: pointer to the HW structure
1621  *
1622  *  Assumes semaphore already acquired.
1623  *
1624  **/
1625 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1626 {
1627 	u16 phy_data;
1628 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1629 	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1630 		E1000_STRAP_SMT_FREQ_SHIFT;
1631 	s32 ret_val;
1632 
1633 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1634 
1635 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1636 	if (ret_val)
1637 		return ret_val;
1638 
1639 	phy_data &= ~HV_SMB_ADDR_MASK;
1640 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1641 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1642 
1643 	if (hw->phy.type == e1000_phy_i217) {
1644 		/* Restore SMBus frequency */
1645 		if (freq--) {
1646 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1647 			phy_data |= (freq & (1 << 0)) <<
1648 				HV_SMB_ADDR_FREQ_LOW_SHIFT;
1649 			phy_data |= (freq & (1 << 1)) <<
1650 				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1651 		} else {
1652 			DEBUGOUT("Unsupported SMB frequency in PHY\n");
1653 		}
1654 	}
1655 
1656 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1657 }
1658 
1659 /**
1660  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1661  *  @hw:   pointer to the HW structure
1662  *
1663  *  SW should configure the LCD from the NVM extended configuration region
1664  *  as a workaround for certain parts.
1665  **/
1666 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1667 {
1668 	struct e1000_phy_info *phy = &hw->phy;
1669 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1670 	s32 ret_val = E1000_SUCCESS;
1671 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1672 
1673 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1674 
1675 	/* Initialize the PHY from the NVM on ICH platforms.  This
1676 	 * is needed due to an issue where the NVM configuration is
1677 	 * not properly autoloaded after power transitions.
1678 	 * Therefore, after each PHY reset, we will load the
1679 	 * configuration data out of the NVM manually.
1680 	 */
1681 	switch (hw->mac.type) {
1682 	case e1000_ich8lan:
1683 		if (phy->type != e1000_phy_igp_3)
1684 			return ret_val;
1685 
1686 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1687 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1688 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1689 			break;
1690 		}
1691 		/* Fall-thru */
1692 	case e1000_pchlan:
1693 	case e1000_pch2lan:
1694 	case e1000_pch_lpt:
1695 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1696 		break;
1697 	default:
1698 		return ret_val;
1699 	}
1700 
1701 	ret_val = hw->phy.ops.acquire(hw);
1702 	if (ret_val)
1703 		return ret_val;
1704 
1705 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
1706 	if (!(data & sw_cfg_mask))
1707 		goto release;
1708 
1709 	/* Make sure HW does not configure LCD from PHY
1710 	 * extended configuration before SW configuration
1711 	 */
1712 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1713 	if ((hw->mac.type < e1000_pch2lan) &&
1714 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1715 			goto release;
1716 
1717 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1718 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1719 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1720 	if (!cnf_size)
1721 		goto release;
1722 
1723 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1724 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1725 
1726 	if (((hw->mac.type == e1000_pchlan) &&
1727 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1728 	    (hw->mac.type > e1000_pchlan)) {
1729 		/* HW configures the SMBus address and LEDs when the
1730 		 * OEM and LCD Write Enable bits are set in the NVM.
1731 		 * When both NVM bits are cleared, SW will configure
1732 		 * them instead.
1733 		 */
1734 		ret_val = e1000_write_smbus_addr(hw);
1735 		if (ret_val)
1736 			goto release;
1737 
1738 		data = E1000_READ_REG(hw, E1000_LEDCTL);
1739 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1740 							(u16)data);
1741 		if (ret_val)
1742 			goto release;
1743 	}
1744 
1745 	/* Configure LCD from extended configuration region. */
1746 
1747 	/* cnf_base_addr is in DWORD */
1748 	word_addr = (u16)(cnf_base_addr << 1);
1749 
1750 	for (i = 0; i < cnf_size; i++) {
1751 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1752 					   &reg_data);
1753 		if (ret_val)
1754 			goto release;
1755 
1756 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1757 					   1, &reg_addr);
1758 		if (ret_val)
1759 			goto release;
1760 
1761 		/* Save off the PHY page for future writes. */
1762 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1763 			phy_page = reg_data;
1764 			continue;
1765 		}
1766 
1767 		reg_addr &= PHY_REG_MASK;
1768 		reg_addr |= phy_page;
1769 
1770 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1771 						    reg_data);
1772 		if (ret_val)
1773 			goto release;
1774 	}
1775 
1776 release:
1777 	hw->phy.ops.release(hw);
1778 	return ret_val;
1779 }
1780 
1781 /**
1782  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1783  *  @hw:   pointer to the HW structure
1784  *  @link: link up bool flag
1785  *
1786  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1787  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1788  *  If link is down, the function will restore the default K1 setting located
1789  *  in the NVM.
1790  **/
1791 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1792 {
1793 	s32 ret_val = E1000_SUCCESS;
1794 	u16 status_reg = 0;
1795 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1796 
1797 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
1798 
1799 	if (hw->mac.type != e1000_pchlan)
1800 		return E1000_SUCCESS;
1801 
1802 	/* Wrap the whole flow with the sw flag */
1803 	ret_val = hw->phy.ops.acquire(hw);
1804 	if (ret_val)
1805 		return ret_val;
1806 
1807 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1808 	if (link) {
1809 		if (hw->phy.type == e1000_phy_82578) {
1810 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1811 							      &status_reg);
1812 			if (ret_val)
1813 				goto release;
1814 
1815 			status_reg &= BM_CS_STATUS_LINK_UP |
1816 				      BM_CS_STATUS_RESOLVED |
1817 				      BM_CS_STATUS_SPEED_MASK;
1818 
1819 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1820 					   BM_CS_STATUS_RESOLVED |
1821 					   BM_CS_STATUS_SPEED_1000))
1822 				k1_enable = FALSE;
1823 		}
1824 
1825 		if (hw->phy.type == e1000_phy_82577) {
1826 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1827 							      &status_reg);
1828 			if (ret_val)
1829 				goto release;
1830 
1831 			status_reg &= HV_M_STATUS_LINK_UP |
1832 				      HV_M_STATUS_AUTONEG_COMPLETE |
1833 				      HV_M_STATUS_SPEED_MASK;
1834 
1835 			if (status_reg == (HV_M_STATUS_LINK_UP |
1836 					   HV_M_STATUS_AUTONEG_COMPLETE |
1837 					   HV_M_STATUS_SPEED_1000))
1838 				k1_enable = FALSE;
1839 		}
1840 
1841 		/* Link stall fix for link up */
1842 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1843 						       0x0100);
1844 		if (ret_val)
1845 			goto release;
1846 
1847 	} else {
1848 		/* Link stall fix for link down */
1849 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1850 						       0x4100);
1851 		if (ret_val)
1852 			goto release;
1853 	}
1854 
1855 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1856 
1857 release:
1858 	hw->phy.ops.release(hw);
1859 
1860 	return ret_val;
1861 }
1862 
1863 /**
1864  *  e1000_configure_k1_ich8lan - Configure K1 power state
1865  *  @hw: pointer to the HW structure
1866  *  @enable: K1 state to configure
1867  *
1868  *  Configure the K1 power state based on the provided parameter.
1869  *  Assumes semaphore already acquired.
1870  *
1871  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1872  **/
1873 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1874 {
1875 	s32 ret_val;
1876 	u32 ctrl_reg = 0;
1877 	u32 ctrl_ext = 0;
1878 	u32 reg = 0;
1879 	u16 kmrn_reg = 0;
1880 
1881 	DEBUGFUNC("e1000_configure_k1_ich8lan");
1882 
1883 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1884 					     &kmrn_reg);
1885 	if (ret_val)
1886 		return ret_val;
1887 
1888 	if (k1_enable)
1889 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1890 	else
1891 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1892 
1893 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1894 					      kmrn_reg);
1895 	if (ret_val)
1896 		return ret_val;
1897 
1898 	usec_delay(20);
1899 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1900 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1901 
1902 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1903 	reg |= E1000_CTRL_FRCSPD;
1904 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
1905 
1906 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1907 	E1000_WRITE_FLUSH(hw);
1908 	usec_delay(20);
1909 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1910 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1911 	E1000_WRITE_FLUSH(hw);
1912 	usec_delay(20);
1913 
1914 	return E1000_SUCCESS;
1915 }
1916 
1917 /**
1918  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1919  *  @hw:       pointer to the HW structure
1920  *  @d0_state: boolean if entering d0 or d3 device state
1921  *
1922  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1923  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1924  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1925  **/
1926 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1927 {
1928 	s32 ret_val = 0;
1929 	u32 mac_reg;
1930 	u16 oem_reg;
1931 
1932 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1933 
1934 	if (hw->mac.type < e1000_pchlan)
1935 		return ret_val;
1936 
1937 	ret_val = hw->phy.ops.acquire(hw);
1938 	if (ret_val)
1939 		return ret_val;
1940 
1941 	if (hw->mac.type == e1000_pchlan) {
1942 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1943 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1944 			goto release;
1945 	}
1946 
1947 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1948 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1949 		goto release;
1950 
1951 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1952 
1953 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1954 	if (ret_val)
1955 		goto release;
1956 
1957 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1958 
1959 	if (d0_state) {
1960 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1961 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1962 
1963 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1964 			oem_reg |= HV_OEM_BITS_LPLU;
1965 	} else {
1966 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1967 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1968 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1969 
1970 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1971 		    E1000_PHY_CTRL_NOND0A_LPLU))
1972 			oem_reg |= HV_OEM_BITS_LPLU;
1973 	}
1974 
1975 	/* Set Restart auto-neg to activate the bits */
1976 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1977 	    !hw->phy.ops.check_reset_block(hw))
1978 		oem_reg |= HV_OEM_BITS_RESTART_AN;
1979 
1980 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1981 
1982 release:
1983 	hw->phy.ops.release(hw);
1984 
1985 	return ret_val;
1986 }
1987 
1988 
1989 /**
1990  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1991  *  @hw:   pointer to the HW structure
1992  **/
1993 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1994 {
1995 	s32 ret_val;
1996 	u16 data;
1997 
1998 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1999 
2000 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2001 	if (ret_val)
2002 		return ret_val;
2003 
2004 	data |= HV_KMRN_MDIO_SLOW;
2005 
2006 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2007 
2008 	return ret_val;
2009 }
2010 
2011 /**
2012  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2013  *  done after every PHY reset.
2014  **/
2015 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2016 {
2017 	s32 ret_val = E1000_SUCCESS;
2018 	u16 phy_data;
2019 
2020 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2021 
2022 	if (hw->mac.type != e1000_pchlan)
2023 		return E1000_SUCCESS;
2024 
2025 	/* Set MDIO slow mode before any other MDIO access */
2026 	if (hw->phy.type == e1000_phy_82577) {
2027 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2028 		if (ret_val)
2029 			return ret_val;
2030 	}
2031 
2032 	if (((hw->phy.type == e1000_phy_82577) &&
2033 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2034 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2035 		/* Disable generation of early preamble */
2036 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2037 		if (ret_val)
2038 			return ret_val;
2039 
2040 		/* Preamble tuning for SSC */
2041 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2042 						0xA204);
2043 		if (ret_val)
2044 			return ret_val;
2045 	}
2046 
2047 	if (hw->phy.type == e1000_phy_82578) {
2048 		/* Return registers to default by doing a soft reset then
2049 		 * writing 0x3140 to the control register.
2050 		 */
2051 		if (hw->phy.revision < 2) {
2052 			e1000_phy_sw_reset_generic(hw);
2053 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2054 							0x3140);
2055 		}
2056 	}
2057 
2058 	/* Select page 0 */
2059 	ret_val = hw->phy.ops.acquire(hw);
2060 	if (ret_val)
2061 		return ret_val;
2062 
2063 	hw->phy.addr = 1;
2064 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2065 	hw->phy.ops.release(hw);
2066 	if (ret_val)
2067 		return ret_val;
2068 
2069 	/* Configure the K1 Si workaround during phy reset assuming there is
2070 	 * link so that it disables K1 if link is in 1Gbps.
2071 	 */
2072 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2073 	if (ret_val)
2074 		return ret_val;
2075 
2076 	/* Workaround for link disconnects on a busy hub in half duplex */
2077 	ret_val = hw->phy.ops.acquire(hw);
2078 	if (ret_val)
2079 		return ret_val;
2080 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2081 	if (ret_val)
2082 		goto release;
2083 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2084 					       phy_data & 0x00FF);
2085 	if (ret_val)
2086 		goto release;
2087 
2088 	/* set MSE higher to enable link to stay up when noise is high */
2089 	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2090 release:
2091 	hw->phy.ops.release(hw);
2092 
2093 	return ret_val;
2094 }
2095 
2096 /**
2097  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2098  *  @hw:   pointer to the HW structure
2099  **/
2100 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2101 {
2102 	u32 mac_reg;
2103 	u16 i, phy_reg = 0;
2104 	s32 ret_val;
2105 
2106 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2107 
2108 	ret_val = hw->phy.ops.acquire(hw);
2109 	if (ret_val)
2110 		return;
2111 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2112 	if (ret_val)
2113 		goto release;
2114 
2115 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
2116 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
2117 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2118 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2119 					   (u16)(mac_reg & 0xFFFF));
2120 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2121 					   (u16)((mac_reg >> 16) & 0xFFFF));
2122 
2123 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2124 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2125 					   (u16)(mac_reg & 0xFFFF));
2126 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2127 					   (u16)((mac_reg & E1000_RAH_AV)
2128 						 >> 16));
2129 	}
2130 
2131 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2132 
2133 release:
2134 	hw->phy.ops.release(hw);
2135 }
2136 
2137 static u32 e1000_calc_rx_da_crc(u8 mac[])
2138 {
2139 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2140 	u32 i, j, mask, crc;
2141 
2142 	DEBUGFUNC("e1000_calc_rx_da_crc");
2143 
2144 	crc = 0xffffffff;
2145 	for (i = 0; i < 6; i++) {
2146 		crc = crc ^ mac[i];
2147 		for (j = 8; j > 0; j--) {
2148 			mask = (crc & 1) * (-1);
2149 			crc = (crc >> 1) ^ (poly & mask);
2150 		}
2151 	}
2152 	return ~crc;
2153 }
2154 
2155 /**
2156  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2157  *  with 82579 PHY
2158  *  @hw: pointer to the HW structure
2159  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2160  **/
2161 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2162 {
2163 	s32 ret_val = E1000_SUCCESS;
2164 	u16 phy_reg, data;
2165 	u32 mac_reg;
2166 	u16 i;
2167 
2168 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2169 
2170 	if (hw->mac.type < e1000_pch2lan)
2171 		return E1000_SUCCESS;
2172 
2173 	/* disable Rx path while enabling/disabling workaround */
2174 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2175 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2176 					phy_reg | (1 << 14));
2177 	if (ret_val)
2178 		return ret_val;
2179 
2180 	if (enable) {
2181 		/* Write Rx addresses (rar_entry_count for RAL/H, +4 for
2182 		 * SHRAL/H) and initial CRC values to the MAC
2183 		 */
2184 		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
2185 			u8 mac_addr[ETH_ADDR_LEN] = {0};
2186 			u32 addr_high, addr_low;
2187 
2188 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2189 			if (!(addr_high & E1000_RAH_AV))
2190 				continue;
2191 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2192 			mac_addr[0] = (addr_low & 0xFF);
2193 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2194 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2195 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2196 			mac_addr[4] = (addr_high & 0xFF);
2197 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2198 
2199 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2200 					e1000_calc_rx_da_crc(mac_addr));
2201 		}
2202 
2203 		/* Write Rx addresses to the PHY */
2204 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2205 
2206 		/* Enable jumbo frame workaround in the MAC */
2207 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2208 		mac_reg &= ~(1 << 14);
2209 		mac_reg |= (7 << 15);
2210 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2211 
2212 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2213 		mac_reg |= E1000_RCTL_SECRC;
2214 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2215 
2216 		ret_val = e1000_read_kmrn_reg_generic(hw,
2217 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2218 						&data);
2219 		if (ret_val)
2220 			return ret_val;
2221 		ret_val = e1000_write_kmrn_reg_generic(hw,
2222 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2223 						data | (1 << 0));
2224 		if (ret_val)
2225 			return ret_val;
2226 		ret_val = e1000_read_kmrn_reg_generic(hw,
2227 						E1000_KMRNCTRLSTA_HD_CTRL,
2228 						&data);
2229 		if (ret_val)
2230 			return ret_val;
2231 		data &= ~(0xF << 8);
2232 		data |= (0xB << 8);
2233 		ret_val = e1000_write_kmrn_reg_generic(hw,
2234 						E1000_KMRNCTRLSTA_HD_CTRL,
2235 						data);
2236 		if (ret_val)
2237 			return ret_val;
2238 
2239 		/* Enable jumbo frame workaround in the PHY */
2240 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2241 		data &= ~(0x7F << 5);
2242 		data |= (0x37 << 5);
2243 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2244 		if (ret_val)
2245 			return ret_val;
2246 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2247 		data &= ~(1 << 13);
2248 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2249 		if (ret_val)
2250 			return ret_val;
2251 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2252 		data &= ~(0x3FF << 2);
2253 		data |= (0x1A << 2);
2254 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2255 		if (ret_val)
2256 			return ret_val;
2257 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2258 		if (ret_val)
2259 			return ret_val;
2260 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2261 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2262 						(1 << 10));
2263 		if (ret_val)
2264 			return ret_val;
2265 	} else {
2266 		/* Write MAC register values back to h/w defaults */
2267 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2268 		mac_reg &= ~(0xF << 14);
2269 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2270 
2271 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2272 		mac_reg &= ~E1000_RCTL_SECRC;
2273 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2274 
2275 		ret_val = e1000_read_kmrn_reg_generic(hw,
2276 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2277 						&data);
2278 		if (ret_val)
2279 			return ret_val;
2280 		ret_val = e1000_write_kmrn_reg_generic(hw,
2281 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2282 						data & ~(1 << 0));
2283 		if (ret_val)
2284 			return ret_val;
2285 		ret_val = e1000_read_kmrn_reg_generic(hw,
2286 						E1000_KMRNCTRLSTA_HD_CTRL,
2287 						&data);
2288 		if (ret_val)
2289 			return ret_val;
2290 		data &= ~(0xF << 8);
2291 		data |= (0xB << 8);
2292 		ret_val = e1000_write_kmrn_reg_generic(hw,
2293 						E1000_KMRNCTRLSTA_HD_CTRL,
2294 						data);
2295 		if (ret_val)
2296 			return ret_val;
2297 
2298 		/* Write PHY register values back to h/w defaults */
2299 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2300 		data &= ~(0x7F << 5);
2301 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2302 		if (ret_val)
2303 			return ret_val;
2304 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2305 		data |= (1 << 13);
2306 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2307 		if (ret_val)
2308 			return ret_val;
2309 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2310 		data &= ~(0x3FF << 2);
2311 		data |= (0x8 << 2);
2312 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2313 		if (ret_val)
2314 			return ret_val;
2315 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2316 		if (ret_val)
2317 			return ret_val;
2318 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2319 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2320 						~(1 << 10));
2321 		if (ret_val)
2322 			return ret_val;
2323 	}
2324 
2325 	/* re-enable Rx path after enabling/disabling workaround */
2326 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2327 				     ~(1 << 14));
2328 }
2329 
2330 /**
2331  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2332  *  done after every PHY reset.
2333  **/
2334 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2335 {
2336 	s32 ret_val = E1000_SUCCESS;
2337 
2338 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2339 
2340 	if (hw->mac.type != e1000_pch2lan)
2341 		return E1000_SUCCESS;
2342 
2343 	/* Set MDIO slow mode before any other MDIO access */
2344 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2345 	if (ret_val)
2346 		return ret_val;
2347 
2348 	ret_val = hw->phy.ops.acquire(hw);
2349 	if (ret_val)
2350 		return ret_val;
2351 	/* set MSE higher to enable link to stay up when noise is high */
2352 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2353 	if (ret_val)
2354 		goto release;
2355 	/* drop link after 5 times MSE threshold was reached */
2356 	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2357 release:
2358 	hw->phy.ops.release(hw);
2359 
2360 	return ret_val;
2361 }
2362 
2363 /**
2364  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2365  *  @hw:   pointer to the HW structure
2366  *
2367  *  Workaround to set the K1 beacon duration for 82579 parts
2368  **/
2369 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2370 {
2371 	s32 ret_val = E1000_SUCCESS;
2372 	u16 status_reg = 0;
2373 	u32 mac_reg;
2374 	u16 phy_reg;
2375 
2376 	DEBUGFUNC("e1000_k1_workaround_lv");
2377 
2378 	if (hw->mac.type != e1000_pch2lan)
2379 		return E1000_SUCCESS;
2380 
2381 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
2382 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2383 	if (ret_val)
2384 		return ret_val;
2385 
2386 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2387 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2388 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2389 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2390 
2391 		ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
2392 		if (ret_val)
2393 			return ret_val;
2394 
2395 		if (status_reg & HV_M_STATUS_SPEED_1000) {
2396 			u16 pm_phy_reg;
2397 
2398 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2399 			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2400 			/* LV 1G Packet drop issue wa  */
2401 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2402 						       &pm_phy_reg);
2403 			if (ret_val)
2404 				return ret_val;
2405 			pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2406 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2407 							pm_phy_reg);
2408 			if (ret_val)
2409 				return ret_val;
2410 		} else {
2411 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2412 			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2413 		}
2414 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2415 		ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2416 	}
2417 
2418 	return ret_val;
2419 }
2420 
2421 /**
2422  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2423  *  @hw:   pointer to the HW structure
2424  *  @gate: boolean set to TRUE to gate, FALSE to ungate
2425  *
2426  *  Gate/ungate the automatic PHY configuration via hardware; perform
2427  *  the configuration via software instead.
2428  **/
2429 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2430 {
2431 	u32 extcnf_ctrl;
2432 
2433 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2434 
2435 	if (hw->mac.type < e1000_pch2lan)
2436 		return;
2437 
2438 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2439 
2440 	if (gate)
2441 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2442 	else
2443 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2444 
2445 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2446 }
2447 
2448 /**
2449  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2450  *  @hw: pointer to the HW structure
2451  *
2452  *  Check the appropriate indication the MAC has finished configuring the
2453  *  PHY after a software reset.
2454  **/
2455 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2456 {
2457 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2458 
2459 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
2460 
2461 	/* Wait for basic configuration completes before proceeding */
2462 	do {
2463 		data = E1000_READ_REG(hw, E1000_STATUS);
2464 		data &= E1000_STATUS_LAN_INIT_DONE;
2465 		usec_delay(100);
2466 	} while ((!data) && --loop);
2467 
2468 	/* If basic configuration is incomplete before the above loop
2469 	 * count reaches 0, loading the configuration from NVM will
2470 	 * leave the PHY in a bad state possibly resulting in no link.
2471 	 */
2472 	if (loop == 0)
2473 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2474 
2475 	/* Clear the Init Done bit for the next init event */
2476 	data = E1000_READ_REG(hw, E1000_STATUS);
2477 	data &= ~E1000_STATUS_LAN_INIT_DONE;
2478 	E1000_WRITE_REG(hw, E1000_STATUS, data);
2479 }
2480 
2481 /**
2482  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2483  *  @hw: pointer to the HW structure
2484  **/
2485 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2486 {
2487 	s32 ret_val = E1000_SUCCESS;
2488 	u16 reg;
2489 
2490 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2491 
2492 	if (hw->phy.ops.check_reset_block(hw))
2493 		return E1000_SUCCESS;
2494 
2495 	/* Allow time for h/w to get to quiescent state after reset */
2496 	msec_delay(10);
2497 
2498 	/* Perform any necessary post-reset workarounds */
2499 	switch (hw->mac.type) {
2500 	case e1000_pchlan:
2501 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2502 		if (ret_val)
2503 			return ret_val;
2504 		break;
2505 	case e1000_pch2lan:
2506 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2507 		if (ret_val)
2508 			return ret_val;
2509 		break;
2510 	default:
2511 		break;
2512 	}
2513 
2514 	/* Clear the host wakeup bit after lcd reset */
2515 	if (hw->mac.type >= e1000_pchlan) {
2516 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2517 		reg &= ~BM_WUC_HOST_WU_BIT;
2518 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2519 	}
2520 
2521 	/* Configure the LCD with the extended configuration region in NVM */
2522 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
2523 	if (ret_val)
2524 		return ret_val;
2525 
2526 	/* Configure the LCD with the OEM bits in NVM */
2527 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
2528 
2529 	if (hw->mac.type == e1000_pch2lan) {
2530 		/* Ungate automatic PHY configuration on non-managed 82579 */
2531 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
2532 		    E1000_ICH_FWSM_FW_VALID)) {
2533 			msec_delay(10);
2534 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
2535 		}
2536 
2537 		/* Set EEE LPI Update Timer to 200usec */
2538 		ret_val = hw->phy.ops.acquire(hw);
2539 		if (ret_val)
2540 			return ret_val;
2541 		ret_val = e1000_write_emi_reg_locked(hw,
2542 						     I82579_LPI_UPDATE_TIMER,
2543 						     0x1387);
2544 		hw->phy.ops.release(hw);
2545 	}
2546 
2547 	return ret_val;
2548 }
2549 
2550 /**
2551  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2552  *  @hw: pointer to the HW structure
2553  *
2554  *  Resets the PHY
2555  *  This is a function pointer entry point called by drivers
2556  *  or other shared routines.
2557  **/
2558 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2559 {
2560 	s32 ret_val = E1000_SUCCESS;
2561 
2562 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2563 
2564 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
2565 	if ((hw->mac.type == e1000_pch2lan) &&
2566 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2567 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
2568 
2569 	ret_val = e1000_phy_hw_reset_generic(hw);
2570 	if (ret_val)
2571 		return ret_val;
2572 
2573 	return e1000_post_phy_reset_ich8lan(hw);
2574 }
2575 
2576 /**
2577  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2578  *  @hw: pointer to the HW structure
2579  *  @active: TRUE to enable LPLU, FALSE to disable
2580  *
2581  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2582  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2583  *  the phy speed. This function will manually set the LPLU bit and restart
2584  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2585  *  since it configures the same bit.
2586  **/
2587 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2588 {
2589 	s32 ret_val;
2590 	u16 oem_reg;
2591 
2592 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
2593 
2594 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2595 	if (ret_val)
2596 		return ret_val;
2597 
2598 	if (active)
2599 		oem_reg |= HV_OEM_BITS_LPLU;
2600 	else
2601 		oem_reg &= ~HV_OEM_BITS_LPLU;
2602 
2603 	if (!hw->phy.ops.check_reset_block(hw))
2604 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2605 
2606 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2607 }
2608 
2609 /**
2610  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2611  *  @hw: pointer to the HW structure
2612  *  @active: TRUE to enable LPLU, FALSE to disable
2613  *
2614  *  Sets the LPLU D0 state according to the active flag.  When
2615  *  activating LPLU this function also disables smart speed
2616  *  and vice versa.  LPLU will not be activated unless the
2617  *  device autonegotiation advertisement meets standards of
2618  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2619  *  This is a function pointer entry point only called by
2620  *  PHY setup routines.
2621  **/
2622 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2623 {
2624 	struct e1000_phy_info *phy = &hw->phy;
2625 	u32 phy_ctrl;
2626 	s32 ret_val = E1000_SUCCESS;
2627 	u16 data;
2628 
2629 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2630 
2631 	if (phy->type == e1000_phy_ife)
2632 		return E1000_SUCCESS;
2633 
2634 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2635 
2636 	if (active) {
2637 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2638 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2639 
2640 		if (phy->type != e1000_phy_igp_3)
2641 			return E1000_SUCCESS;
2642 
2643 		/* Call gig speed drop workaround on LPLU before accessing
2644 		 * any PHY registers
2645 		 */
2646 		if (hw->mac.type == e1000_ich8lan)
2647 			e1000_gig_downshift_workaround_ich8lan(hw);
2648 
2649 		/* When LPLU is enabled, we should disable SmartSpeed */
2650 		ret_val = phy->ops.read_reg(hw,
2651 					    IGP01E1000_PHY_PORT_CONFIG,
2652 					    &data);
2653 		if (ret_val)
2654 			return ret_val;
2655 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2656 		ret_val = phy->ops.write_reg(hw,
2657 					     IGP01E1000_PHY_PORT_CONFIG,
2658 					     data);
2659 		if (ret_val)
2660 			return ret_val;
2661 	} else {
2662 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2663 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2664 
2665 		if (phy->type != e1000_phy_igp_3)
2666 			return E1000_SUCCESS;
2667 
2668 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2669 		 * during Dx states where the power conservation is most
2670 		 * important.  During driver activity we should enable
2671 		 * SmartSpeed, so performance is maintained.
2672 		 */
2673 		if (phy->smart_speed == e1000_smart_speed_on) {
2674 			ret_val = phy->ops.read_reg(hw,
2675 						    IGP01E1000_PHY_PORT_CONFIG,
2676 						    &data);
2677 			if (ret_val)
2678 				return ret_val;
2679 
2680 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2681 			ret_val = phy->ops.write_reg(hw,
2682 						     IGP01E1000_PHY_PORT_CONFIG,
2683 						     data);
2684 			if (ret_val)
2685 				return ret_val;
2686 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2687 			ret_val = phy->ops.read_reg(hw,
2688 						    IGP01E1000_PHY_PORT_CONFIG,
2689 						    &data);
2690 			if (ret_val)
2691 				return ret_val;
2692 
2693 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2694 			ret_val = phy->ops.write_reg(hw,
2695 						     IGP01E1000_PHY_PORT_CONFIG,
2696 						     data);
2697 			if (ret_val)
2698 				return ret_val;
2699 		}
2700 	}
2701 
2702 	return E1000_SUCCESS;
2703 }
2704 
2705 /**
2706  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2707  *  @hw: pointer to the HW structure
2708  *  @active: TRUE to enable LPLU, FALSE to disable
2709  *
2710  *  Sets the LPLU D3 state according to the active flag.  When
2711  *  activating LPLU this function also disables smart speed
2712  *  and vice versa.  LPLU will not be activated unless the
2713  *  device autonegotiation advertisement meets standards of
2714  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2715  *  This is a function pointer entry point only called by
2716  *  PHY setup routines.
2717  **/
2718 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2719 {
2720 	struct e1000_phy_info *phy = &hw->phy;
2721 	u32 phy_ctrl;
2722 	s32 ret_val = E1000_SUCCESS;
2723 	u16 data;
2724 
2725 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2726 
2727 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2728 
2729 	if (!active) {
2730 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2731 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2732 
2733 		if (phy->type != e1000_phy_igp_3)
2734 			return E1000_SUCCESS;
2735 
2736 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2737 		 * during Dx states where the power conservation is most
2738 		 * important.  During driver activity we should enable
2739 		 * SmartSpeed, so performance is maintained.
2740 		 */
2741 		if (phy->smart_speed == e1000_smart_speed_on) {
2742 			ret_val = phy->ops.read_reg(hw,
2743 						    IGP01E1000_PHY_PORT_CONFIG,
2744 						    &data);
2745 			if (ret_val)
2746 				return ret_val;
2747 
2748 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2749 			ret_val = phy->ops.write_reg(hw,
2750 						     IGP01E1000_PHY_PORT_CONFIG,
2751 						     data);
2752 			if (ret_val)
2753 				return ret_val;
2754 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2755 			ret_val = phy->ops.read_reg(hw,
2756 						    IGP01E1000_PHY_PORT_CONFIG,
2757 						    &data);
2758 			if (ret_val)
2759 				return ret_val;
2760 
2761 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2762 			ret_val = phy->ops.write_reg(hw,
2763 						     IGP01E1000_PHY_PORT_CONFIG,
2764 						     data);
2765 			if (ret_val)
2766 				return ret_val;
2767 		}
2768 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2769 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2770 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2771 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2772 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2773 
2774 		if (phy->type != e1000_phy_igp_3)
2775 			return E1000_SUCCESS;
2776 
2777 		/* Call gig speed drop workaround on LPLU before accessing
2778 		 * any PHY registers
2779 		 */
2780 		if (hw->mac.type == e1000_ich8lan)
2781 			e1000_gig_downshift_workaround_ich8lan(hw);
2782 
2783 		/* When LPLU is enabled, we should disable SmartSpeed */
2784 		ret_val = phy->ops.read_reg(hw,
2785 					    IGP01E1000_PHY_PORT_CONFIG,
2786 					    &data);
2787 		if (ret_val)
2788 			return ret_val;
2789 
2790 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2791 		ret_val = phy->ops.write_reg(hw,
2792 					     IGP01E1000_PHY_PORT_CONFIG,
2793 					     data);
2794 	}
2795 
2796 	return ret_val;
2797 }
2798 
2799 /**
2800  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2801  *  @hw: pointer to the HW structure
2802  *  @bank:  pointer to the variable that returns the active bank
2803  *
2804  *  Reads signature byte from the NVM using the flash access registers.
2805  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2806  **/
2807 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2808 {
2809 	u32 eecd;
2810 	struct e1000_nvm_info *nvm = &hw->nvm;
2811 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2812 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2813 	u8 sig_byte = 0;
2814 	s32 ret_val;
2815 
2816 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2817 
2818 	switch (hw->mac.type) {
2819 	case e1000_ich8lan:
2820 	case e1000_ich9lan:
2821 		eecd = E1000_READ_REG(hw, E1000_EECD);
2822 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2823 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2824 			if (eecd & E1000_EECD_SEC1VAL)
2825 				*bank = 1;
2826 			else
2827 				*bank = 0;
2828 
2829 			return E1000_SUCCESS;
2830 		}
2831 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2832 		/* fall-thru */
2833 	default:
2834 		/* set bank to 0 in case flash read fails */
2835 		*bank = 0;
2836 
2837 		/* Check bank 0 */
2838 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2839 							&sig_byte);
2840 		if (ret_val)
2841 			return ret_val;
2842 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2843 		    E1000_ICH_NVM_SIG_VALUE) {
2844 			*bank = 0;
2845 			return E1000_SUCCESS;
2846 		}
2847 
2848 		/* Check bank 1 */
2849 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2850 							bank1_offset,
2851 							&sig_byte);
2852 		if (ret_val)
2853 			return ret_val;
2854 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2855 		    E1000_ICH_NVM_SIG_VALUE) {
2856 			*bank = 1;
2857 			return E1000_SUCCESS;
2858 		}
2859 
2860 		DEBUGOUT("ERROR: No valid NVM bank present\n");
2861 		return -E1000_ERR_NVM;
2862 	}
2863 }
2864 
2865 /**
2866  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2867  *  @hw: pointer to the HW structure
2868  *  @offset: The offset (in bytes) of the word(s) to read.
2869  *  @words: Size of data to read in words
2870  *  @data: Pointer to the word(s) to read at offset.
2871  *
2872  *  Reads a word(s) from the NVM using the flash access registers.
2873  **/
2874 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2875 				  u16 *data)
2876 {
2877 	struct e1000_nvm_info *nvm = &hw->nvm;
2878 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2879 	u32 act_offset;
2880 	s32 ret_val = E1000_SUCCESS;
2881 	u32 bank = 0;
2882 	u16 i, word;
2883 
2884 	DEBUGFUNC("e1000_read_nvm_ich8lan");
2885 
2886 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2887 	    (words == 0)) {
2888 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2889 		ret_val = -E1000_ERR_NVM;
2890 		goto out;
2891 	}
2892 
2893 	nvm->ops.acquire(hw);
2894 
2895 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2896 	if (ret_val != E1000_SUCCESS) {
2897 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2898 		bank = 0;
2899 	}
2900 
2901 	act_offset = (bank) ? nvm->flash_bank_size : 0;
2902 	act_offset += offset;
2903 
2904 	ret_val = E1000_SUCCESS;
2905 	for (i = 0; i < words; i++) {
2906 		if (dev_spec->shadow_ram[offset+i].modified) {
2907 			data[i] = dev_spec->shadow_ram[offset+i].value;
2908 		} else {
2909 			ret_val = e1000_read_flash_word_ich8lan(hw,
2910 								act_offset + i,
2911 								&word);
2912 			if (ret_val)
2913 				break;
2914 			data[i] = word;
2915 		}
2916 	}
2917 
2918 	nvm->ops.release(hw);
2919 
2920 out:
2921 	if (ret_val)
2922 		DEBUGOUT1("NVM read error: %d\n", ret_val);
2923 
2924 	return ret_val;
2925 }
2926 
2927 /**
2928  *  e1000_flash_cycle_init_ich8lan - Initialize flash
2929  *  @hw: pointer to the HW structure
2930  *
2931  *  This function does initial flash setup so that a new read/write/erase cycle
2932  *  can be started.
2933  **/
2934 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2935 {
2936 	union ich8_hws_flash_status hsfsts;
2937 	s32 ret_val = -E1000_ERR_NVM;
2938 
2939 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2940 
2941 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2942 
2943 	/* Check if the flash descriptor is valid */
2944 	if (!hsfsts.hsf_status.fldesvalid) {
2945 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
2946 		return -E1000_ERR_NVM;
2947 	}
2948 
2949 	/* Clear FCERR and DAEL in hw status by writing 1 */
2950 	hsfsts.hsf_status.flcerr = 1;
2951 	hsfsts.hsf_status.dael = 1;
2952 
2953 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2954 
2955 	/* Either we should have a hardware SPI cycle in progress
2956 	 * bit to check against, in order to start a new cycle or
2957 	 * FDONE bit should be changed in the hardware so that it
2958 	 * is 1 after hardware reset, which can then be used as an
2959 	 * indication whether a cycle is in progress or has been
2960 	 * completed.
2961 	 */
2962 
2963 	if (!hsfsts.hsf_status.flcinprog) {
2964 		/* There is no cycle running at present,
2965 		 * so we can start a cycle.
2966 		 * Begin by setting Flash Cycle Done.
2967 		 */
2968 		hsfsts.hsf_status.flcdone = 1;
2969 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2970 		ret_val = E1000_SUCCESS;
2971 	} else {
2972 		s32 i;
2973 
2974 		/* Otherwise poll for sometime so the current
2975 		 * cycle has a chance to end before giving up.
2976 		 */
2977 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2978 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2979 							      ICH_FLASH_HSFSTS);
2980 			if (!hsfsts.hsf_status.flcinprog) {
2981 				ret_val = E1000_SUCCESS;
2982 				break;
2983 			}
2984 			usec_delay(1);
2985 		}
2986 		if (ret_val == E1000_SUCCESS) {
2987 			/* Successful in waiting for previous cycle to timeout,
2988 			 * now set the Flash Cycle Done.
2989 			 */
2990 			hsfsts.hsf_status.flcdone = 1;
2991 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2992 						hsfsts.regval);
2993 		} else {
2994 			DEBUGOUT("Flash controller busy, cannot get access\n");
2995 		}
2996 	}
2997 
2998 	return ret_val;
2999 }
3000 
3001 /**
3002  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3003  *  @hw: pointer to the HW structure
3004  *  @timeout: maximum time to wait for completion
3005  *
3006  *  This function starts a flash cycle and waits for its completion.
3007  **/
3008 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3009 {
3010 	union ich8_hws_flash_ctrl hsflctl;
3011 	union ich8_hws_flash_status hsfsts;
3012 	u32 i = 0;
3013 
3014 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3015 
3016 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3017 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3018 	hsflctl.hsf_ctrl.flcgo = 1;
3019 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3020 
3021 	/* wait till FDONE bit is set to 1 */
3022 	do {
3023 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3024 		if (hsfsts.hsf_status.flcdone)
3025 			break;
3026 		usec_delay(1);
3027 	} while (i++ < timeout);
3028 
3029 	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3030 		return E1000_SUCCESS;
3031 
3032 	return -E1000_ERR_NVM;
3033 }
3034 
3035 /**
3036  *  e1000_read_flash_word_ich8lan - Read word from flash
3037  *  @hw: pointer to the HW structure
3038  *  @offset: offset to data location
3039  *  @data: pointer to the location for storing the data
3040  *
3041  *  Reads the flash word at offset into data.  Offset is converted
3042  *  to bytes before read.
3043  **/
3044 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3045 					 u16 *data)
3046 {
3047 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3048 
3049 	if (!data)
3050 		return -E1000_ERR_NVM;
3051 
3052 	/* Must convert offset into bytes. */
3053 	offset <<= 1;
3054 
3055 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3056 }
3057 
3058 /**
3059  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3060  *  @hw: pointer to the HW structure
3061  *  @offset: The offset of the byte to read.
3062  *  @data: Pointer to a byte to store the value read.
3063  *
3064  *  Reads a single byte from the NVM using the flash access registers.
3065  **/
3066 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3067 					 u8 *data)
3068 {
3069 	s32 ret_val;
3070 	u16 word = 0;
3071 
3072 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3073 	if (ret_val)
3074 		return ret_val;
3075 
3076 	*data = (u8)word;
3077 
3078 	return E1000_SUCCESS;
3079 }
3080 
3081 /**
3082  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3083  *  @hw: pointer to the HW structure
3084  *  @offset: The offset (in bytes) of the byte or word to read.
3085  *  @size: Size of data to read, 1=byte 2=word
3086  *  @data: Pointer to the word to store the value read.
3087  *
3088  *  Reads a byte or word from the NVM using the flash access registers.
3089  **/
3090 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3091 					 u8 size, u16 *data)
3092 {
3093 	union ich8_hws_flash_status hsfsts;
3094 	union ich8_hws_flash_ctrl hsflctl;
3095 	u32 flash_linear_addr;
3096 	u32 flash_data = 0;
3097 	s32 ret_val = -E1000_ERR_NVM;
3098 	u8 count = 0;
3099 
3100 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3101 
3102 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3103 		return -E1000_ERR_NVM;
3104 
3105 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3106 			    hw->nvm.flash_base_addr;
3107 
3108 	do {
3109 		usec_delay(1);
3110 		/* Steps */
3111 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3112 		if (ret_val != E1000_SUCCESS)
3113 			break;
3114 
3115 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3116 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3117 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3118 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3119 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3120 
3121 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3122 
3123 		ret_val = e1000_flash_cycle_ich8lan(hw,
3124 						ICH_FLASH_READ_COMMAND_TIMEOUT);
3125 
3126 		/* Check if FCERR is set to 1, if set to 1, clear it
3127 		 * and try the whole sequence a few more times, else
3128 		 * read in (shift in) the Flash Data0, the order is
3129 		 * least significant byte first msb to lsb
3130 		 */
3131 		if (ret_val == E1000_SUCCESS) {
3132 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3133 			if (size == 1)
3134 				*data = (u8)(flash_data & 0x000000FF);
3135 			else if (size == 2)
3136 				*data = (u16)(flash_data & 0x0000FFFF);
3137 			break;
3138 		} else {
3139 			/* If we've gotten here, then things are probably
3140 			 * completely hosed, but if the error condition is
3141 			 * detected, it won't hurt to give it another try...
3142 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3143 			 */
3144 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3145 							      ICH_FLASH_HSFSTS);
3146 			if (hsfsts.hsf_status.flcerr) {
3147 				/* Repeat for some time before giving up. */
3148 				continue;
3149 			} else if (!hsfsts.hsf_status.flcdone) {
3150 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3151 				break;
3152 			}
3153 		}
3154 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3155 
3156 	return ret_val;
3157 }
3158 
3159 /**
3160  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3161  *  @hw: pointer to the HW structure
3162  *  @offset: The offset (in bytes) of the word(s) to write.
3163  *  @words: Size of data to write in words
3164  *  @data: Pointer to the word(s) to write at offset.
3165  *
3166  *  Writes a byte or word to the NVM using the flash access registers.
3167  **/
3168 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3169 				   u16 *data)
3170 {
3171 	struct e1000_nvm_info *nvm = &hw->nvm;
3172 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3173 	u16 i;
3174 
3175 	DEBUGFUNC("e1000_write_nvm_ich8lan");
3176 
3177 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3178 	    (words == 0)) {
3179 		DEBUGOUT("nvm parameter(s) out of bounds\n");
3180 		return -E1000_ERR_NVM;
3181 	}
3182 
3183 	nvm->ops.acquire(hw);
3184 
3185 	for (i = 0; i < words; i++) {
3186 		dev_spec->shadow_ram[offset+i].modified = TRUE;
3187 		dev_spec->shadow_ram[offset+i].value = data[i];
3188 	}
3189 
3190 	nvm->ops.release(hw);
3191 
3192 	return E1000_SUCCESS;
3193 }
3194 
3195 /**
3196  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3197  *  @hw: pointer to the HW structure
3198  *
3199  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3200  *  which writes the checksum to the shadow ram.  The changes in the shadow
3201  *  ram are then committed to the EEPROM by processing each bank at a time
3202  *  checking for the modified bit and writing only the pending changes.
3203  *  After a successful commit, the shadow ram is cleared and is ready for
3204  *  future writes.
3205  **/
3206 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3207 {
3208 	struct e1000_nvm_info *nvm = &hw->nvm;
3209 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3210 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3211 	s32 ret_val;
3212 	u16 data;
3213 
3214 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3215 
3216 	ret_val = e1000_update_nvm_checksum_generic(hw);
3217 	if (ret_val)
3218 		goto out;
3219 
3220 	if (nvm->type != e1000_nvm_flash_sw)
3221 		goto out;
3222 
3223 	nvm->ops.acquire(hw);
3224 
3225 	/* We're writing to the opposite bank so if we're on bank 1,
3226 	 * write to bank 0 etc.  We also need to erase the segment that
3227 	 * is going to be written
3228 	 */
3229 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3230 	if (ret_val != E1000_SUCCESS) {
3231 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3232 		bank = 0;
3233 	}
3234 
3235 	if (bank == 0) {
3236 		new_bank_offset = nvm->flash_bank_size;
3237 		old_bank_offset = 0;
3238 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3239 		if (ret_val)
3240 			goto release;
3241 	} else {
3242 		old_bank_offset = nvm->flash_bank_size;
3243 		new_bank_offset = 0;
3244 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3245 		if (ret_val)
3246 			goto release;
3247 	}
3248 
3249 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3250 		/* Determine whether to write the value stored
3251 		 * in the other NVM bank or a modified value stored
3252 		 * in the shadow RAM
3253 		 */
3254 		if (dev_spec->shadow_ram[i].modified) {
3255 			data = dev_spec->shadow_ram[i].value;
3256 		} else {
3257 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
3258 								old_bank_offset,
3259 								&data);
3260 			if (ret_val)
3261 				break;
3262 		}
3263 
3264 		/* If the word is 0x13, then make sure the signature bits
3265 		 * (15:14) are 11b until the commit has completed.
3266 		 * This will allow us to write 10b which indicates the
3267 		 * signature is valid.  We want to do this after the write
3268 		 * has completed so that we don't mark the segment valid
3269 		 * while the write is still in progress
3270 		 */
3271 		if (i == E1000_ICH_NVM_SIG_WORD)
3272 			data |= E1000_ICH_NVM_SIG_MASK;
3273 
3274 		/* Convert offset to bytes. */
3275 		act_offset = (i + new_bank_offset) << 1;
3276 
3277 		usec_delay(100);
3278 		/* Write the bytes to the new bank. */
3279 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3280 							       act_offset,
3281 							       (u8)data);
3282 		if (ret_val)
3283 			break;
3284 
3285 		usec_delay(100);
3286 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3287 							  act_offset + 1,
3288 							  (u8)(data >> 8));
3289 		if (ret_val)
3290 			break;
3291 	}
3292 
3293 	/* Don't bother writing the segment valid bits if sector
3294 	 * programming failed.
3295 	 */
3296 	if (ret_val) {
3297 		DEBUGOUT("Flash commit failed.\n");
3298 		goto release;
3299 	}
3300 
3301 	/* Finally validate the new segment by setting bit 15:14
3302 	 * to 10b in word 0x13 , this can be done without an
3303 	 * erase as well since these bits are 11 to start with
3304 	 * and we need to change bit 14 to 0b
3305 	 */
3306 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3307 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3308 	if (ret_val)
3309 		goto release;
3310 
3311 	data &= 0xBFFF;
3312 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3313 						       act_offset * 2 + 1,
3314 						       (u8)(data >> 8));
3315 	if (ret_val)
3316 		goto release;
3317 
3318 	/* And invalidate the previously valid segment by setting
3319 	 * its signature word (0x13) high_byte to 0b. This can be
3320 	 * done without an erase because flash erase sets all bits
3321 	 * to 1's. We can write 1's to 0's without an erase
3322 	 */
3323 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3324 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3325 	if (ret_val)
3326 		goto release;
3327 
3328 	/* Great!  Everything worked, we can now clear the cached entries. */
3329 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3330 		dev_spec->shadow_ram[i].modified = FALSE;
3331 		dev_spec->shadow_ram[i].value = 0xFFFF;
3332 	}
3333 
3334 release:
3335 	nvm->ops.release(hw);
3336 
3337 	/* Reload the EEPROM, or else modifications will not appear
3338 	 * until after the next adapter reset.
3339 	 */
3340 	if (!ret_val) {
3341 		nvm->ops.reload(hw);
3342 		msec_delay(10);
3343 	}
3344 
3345 out:
3346 	if (ret_val)
3347 		DEBUGOUT1("NVM update error: %d\n", ret_val);
3348 
3349 	return ret_val;
3350 }
3351 
3352 /**
3353  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3354  *  @hw: pointer to the HW structure
3355  *
3356  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3357  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3358  *  calculated, in which case we need to calculate the checksum and set bit 6.
3359  **/
3360 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3361 {
3362 	s32 ret_val;
3363 	u16 data;
3364 	u16 word;
3365 	u16 valid_csum_mask;
3366 
3367 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3368 
3369 	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3370 	 * the checksum needs to be fixed.  This bit is an indication that
3371 	 * the NVM was prepared by OEM software and did not calculate
3372 	 * the checksum...a likely scenario.
3373 	 */
3374 	switch (hw->mac.type) {
3375 	case e1000_pch_lpt:
3376 		word = NVM_COMPAT;
3377 		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3378 		break;
3379 	default:
3380 		word = NVM_FUTURE_INIT_WORD1;
3381 		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3382 		break;
3383 	}
3384 
3385 	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3386 	if (ret_val)
3387 		return ret_val;
3388 
3389 	if (!(data & valid_csum_mask)) {
3390 		data |= valid_csum_mask;
3391 		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3392 		if (ret_val)
3393 			return ret_val;
3394 		ret_val = hw->nvm.ops.update(hw);
3395 		if (ret_val)
3396 			return ret_val;
3397 	}
3398 
3399 	return e1000_validate_nvm_checksum_generic(hw);
3400 }
3401 
3402 /**
3403  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3404  *  @hw: pointer to the HW structure
3405  *  @offset: The offset (in bytes) of the byte/word to read.
3406  *  @size: Size of data to read, 1=byte 2=word
3407  *  @data: The byte(s) to write to the NVM.
3408  *
3409  *  Writes one/two bytes to the NVM using the flash access registers.
3410  **/
3411 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3412 					  u8 size, u16 data)
3413 {
3414 	union ich8_hws_flash_status hsfsts;
3415 	union ich8_hws_flash_ctrl hsflctl;
3416 	u32 flash_linear_addr;
3417 	u32 flash_data = 0;
3418 	s32 ret_val;
3419 	u8 count = 0;
3420 
3421 	DEBUGFUNC("e1000_write_ich8_data");
3422 
3423 	if (size < 1 || size > 2 || data > size * 0xff ||
3424 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
3425 		return -E1000_ERR_NVM;
3426 
3427 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3428 			    hw->nvm.flash_base_addr;
3429 
3430 	do {
3431 		usec_delay(1);
3432 		/* Steps */
3433 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3434 		if (ret_val != E1000_SUCCESS)
3435 			break;
3436 
3437 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3438 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3439 		hsflctl.hsf_ctrl.fldbcount = size - 1;
3440 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3441 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3442 
3443 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3444 
3445 		if (size == 1)
3446 			flash_data = (u32)data & 0x00FF;
3447 		else
3448 			flash_data = (u32)data;
3449 
3450 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3451 
3452 		/* check if FCERR is set to 1 , if set to 1, clear it
3453 		 * and try the whole sequence a few more times else done
3454 		 */
3455 		ret_val = e1000_flash_cycle_ich8lan(hw,
3456 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3457 		if (ret_val == E1000_SUCCESS)
3458 			break;
3459 
3460 		/* If we're here, then things are most likely
3461 		 * completely hosed, but if the error condition
3462 		 * is detected, it won't hurt to give it another
3463 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3464 		 */
3465 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3466 		if (hsfsts.hsf_status.flcerr)
3467 			/* Repeat for some time before giving up. */
3468 			continue;
3469 		if (!hsfsts.hsf_status.flcdone) {
3470 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3471 			break;
3472 		}
3473 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3474 
3475 	return ret_val;
3476 }
3477 
3478 /**
3479  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3480  *  @hw: pointer to the HW structure
3481  *  @offset: The index of the byte to read.
3482  *  @data: The byte to write to the NVM.
3483  *
3484  *  Writes a single byte to the NVM using the flash access registers.
3485  **/
3486 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3487 					  u8 data)
3488 {
3489 	u16 word = (u16)data;
3490 
3491 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3492 
3493 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3494 }
3495 
3496 /**
3497  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3498  *  @hw: pointer to the HW structure
3499  *  @offset: The offset of the byte to write.
3500  *  @byte: The byte to write to the NVM.
3501  *
3502  *  Writes a single byte to the NVM using the flash access registers.
3503  *  Goes through a retry algorithm before giving up.
3504  **/
3505 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3506 						u32 offset, u8 byte)
3507 {
3508 	s32 ret_val;
3509 	u16 program_retries;
3510 
3511 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3512 
3513 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3514 	if (!ret_val)
3515 		return ret_val;
3516 
3517 	for (program_retries = 0; program_retries < 100; program_retries++) {
3518 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3519 		usec_delay(100);
3520 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3521 		if (ret_val == E1000_SUCCESS)
3522 			break;
3523 	}
3524 	if (program_retries == 100)
3525 		return -E1000_ERR_NVM;
3526 
3527 	return E1000_SUCCESS;
3528 }
3529 
3530 /**
3531  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3532  *  @hw: pointer to the HW structure
3533  *  @bank: 0 for first bank, 1 for second bank, etc.
3534  *
3535  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3536  *  bank N is 4096 * N + flash_reg_addr.
3537  **/
3538 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3539 {
3540 	struct e1000_nvm_info *nvm = &hw->nvm;
3541 	union ich8_hws_flash_status hsfsts;
3542 	union ich8_hws_flash_ctrl hsflctl;
3543 	u32 flash_linear_addr;
3544 	/* bank size is in 16bit words - adjust to bytes */
3545 	u32 flash_bank_size = nvm->flash_bank_size * 2;
3546 	s32 ret_val;
3547 	s32 count = 0;
3548 	s32 j, iteration, sector_size;
3549 
3550 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3551 
3552 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3553 
3554 	/* Determine HW Sector size: Read BERASE bits of hw flash status
3555 	 * register
3556 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3557 	 *     consecutive sectors.  The start index for the nth Hw sector
3558 	 *     can be calculated as = bank * 4096 + n * 256
3559 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3560 	 *     The start index for the nth Hw sector can be calculated
3561 	 *     as = bank * 4096
3562 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3563 	 *     (ich9 only, otherwise error condition)
3564 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3565 	 */
3566 	switch (hsfsts.hsf_status.berasesz) {
3567 	case 0:
3568 		/* Hw sector size 256 */
3569 		sector_size = ICH_FLASH_SEG_SIZE_256;
3570 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3571 		break;
3572 	case 1:
3573 		sector_size = ICH_FLASH_SEG_SIZE_4K;
3574 		iteration = 1;
3575 		break;
3576 	case 2:
3577 		sector_size = ICH_FLASH_SEG_SIZE_8K;
3578 		iteration = 1;
3579 		break;
3580 	case 3:
3581 		sector_size = ICH_FLASH_SEG_SIZE_64K;
3582 		iteration = 1;
3583 		break;
3584 	default:
3585 		return -E1000_ERR_NVM;
3586 	}
3587 
3588 	/* Start with the base address, then add the sector offset. */
3589 	flash_linear_addr = hw->nvm.flash_base_addr;
3590 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3591 
3592 	for (j = 0; j < iteration ; j++) {
3593 		do {
3594 			/* Steps */
3595 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3596 			if (ret_val)
3597 				return ret_val;
3598 
3599 			/* Write a value 11 (block Erase) in Flash
3600 			 * Cycle field in hw flash control
3601 			 */
3602 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3603 							      ICH_FLASH_HSFCTL);
3604 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3605 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3606 						hsflctl.regval);
3607 
3608 			/* Write the last 24 bits of an index within the
3609 			 * block into Flash Linear address field in Flash
3610 			 * Address.
3611 			 */
3612 			flash_linear_addr += (j * sector_size);
3613 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3614 					      flash_linear_addr);
3615 
3616 			ret_val = e1000_flash_cycle_ich8lan(hw,
3617 					       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3618 			if (ret_val == E1000_SUCCESS)
3619 				break;
3620 
3621 			/* Check if FCERR is set to 1.  If 1,
3622 			 * clear it and try the whole sequence
3623 			 * a few more times else Done
3624 			 */
3625 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3626 						      ICH_FLASH_HSFSTS);
3627 			if (hsfsts.hsf_status.flcerr)
3628 				/* repeat for some time before giving up */
3629 				continue;
3630 			else if (!hsfsts.hsf_status.flcdone)
3631 				return ret_val;
3632 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3633 	}
3634 
3635 	return E1000_SUCCESS;
3636 }
3637 
3638 /**
3639  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3640  *  @hw: pointer to the HW structure
3641  *  @data: Pointer to the LED settings
3642  *
3643  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3644  *  settings is all 0's or F's, set the LED default to a valid LED default
3645  *  setting.
3646  **/
3647 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3648 {
3649 	s32 ret_val;
3650 
3651 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
3652 
3653 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3654 	if (ret_val) {
3655 		DEBUGOUT("NVM Read Error\n");
3656 		return ret_val;
3657 	}
3658 
3659 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3660 		*data = ID_LED_DEFAULT_ICH8LAN;
3661 
3662 	return E1000_SUCCESS;
3663 }
3664 
3665 /**
3666  *  e1000_id_led_init_pchlan - store LED configurations
3667  *  @hw: pointer to the HW structure
3668  *
3669  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3670  *  the PHY LED configuration register.
3671  *
3672  *  PCH also does not have an "always on" or "always off" mode which
3673  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3674  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3675  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3676  *  link based on logic in e1000_led_[on|off]_pchlan().
3677  **/
3678 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3679 {
3680 	struct e1000_mac_info *mac = &hw->mac;
3681 	s32 ret_val;
3682 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3683 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3684 	u16 data, i, temp, shift;
3685 
3686 	DEBUGFUNC("e1000_id_led_init_pchlan");
3687 
3688 	/* Get default ID LED modes */
3689 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3690 	if (ret_val)
3691 		return ret_val;
3692 
3693 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3694 	mac->ledctl_mode1 = mac->ledctl_default;
3695 	mac->ledctl_mode2 = mac->ledctl_default;
3696 
3697 	for (i = 0; i < 4; i++) {
3698 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3699 		shift = (i * 5);
3700 		switch (temp) {
3701 		case ID_LED_ON1_DEF2:
3702 		case ID_LED_ON1_ON2:
3703 		case ID_LED_ON1_OFF2:
3704 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3705 			mac->ledctl_mode1 |= (ledctl_on << shift);
3706 			break;
3707 		case ID_LED_OFF1_DEF2:
3708 		case ID_LED_OFF1_ON2:
3709 		case ID_LED_OFF1_OFF2:
3710 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3711 			mac->ledctl_mode1 |= (ledctl_off << shift);
3712 			break;
3713 		default:
3714 			/* Do nothing */
3715 			break;
3716 		}
3717 		switch (temp) {
3718 		case ID_LED_DEF1_ON2:
3719 		case ID_LED_ON1_ON2:
3720 		case ID_LED_OFF1_ON2:
3721 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3722 			mac->ledctl_mode2 |= (ledctl_on << shift);
3723 			break;
3724 		case ID_LED_DEF1_OFF2:
3725 		case ID_LED_ON1_OFF2:
3726 		case ID_LED_OFF1_OFF2:
3727 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3728 			mac->ledctl_mode2 |= (ledctl_off << shift);
3729 			break;
3730 		default:
3731 			/* Do nothing */
3732 			break;
3733 		}
3734 	}
3735 
3736 	return E1000_SUCCESS;
3737 }
3738 
3739 /**
3740  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3741  *  @hw: pointer to the HW structure
3742  *
3743  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3744  *  register, so the the bus width is hard coded.
3745  **/
3746 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3747 {
3748 	struct e1000_bus_info *bus = &hw->bus;
3749 	s32 ret_val;
3750 
3751 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
3752 
3753 	ret_val = e1000_get_bus_info_pcie_generic(hw);
3754 
3755 	/* ICH devices are "PCI Express"-ish.  They have
3756 	 * a configuration space, but do not contain
3757 	 * PCI Express Capability registers, so bus width
3758 	 * must be hardcoded.
3759 	 */
3760 	if (bus->width == e1000_bus_width_unknown)
3761 		bus->width = e1000_bus_width_pcie_x1;
3762 
3763 	return ret_val;
3764 }
3765 
3766 /**
3767  *  e1000_reset_hw_ich8lan - Reset the hardware
3768  *  @hw: pointer to the HW structure
3769  *
3770  *  Does a full reset of the hardware which includes a reset of the PHY and
3771  *  MAC.
3772  **/
3773 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3774 {
3775 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3776 	u16 kum_cfg;
3777 	u32 ctrl, reg;
3778 	s32 ret_val;
3779 
3780 	DEBUGFUNC("e1000_reset_hw_ich8lan");
3781 
3782 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
3783 	 * on the last TLP read/write transaction when MAC is reset.
3784 	 */
3785 	ret_val = e1000_disable_pcie_master_generic(hw);
3786 	if (ret_val)
3787 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
3788 
3789 	DEBUGOUT("Masking off all interrupts\n");
3790 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3791 
3792 	/* Disable the Transmit and Receive units.  Then delay to allow
3793 	 * any pending transactions to complete before we hit the MAC
3794 	 * with the global reset.
3795 	 */
3796 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
3797 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3798 	E1000_WRITE_FLUSH(hw);
3799 
3800 	msec_delay(10);
3801 
3802 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3803 	if (hw->mac.type == e1000_ich8lan) {
3804 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3805 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3806 		/* Set Packet Buffer Size to 16k. */
3807 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3808 	}
3809 
3810 	if (hw->mac.type == e1000_pchlan) {
3811 		/* Save the NVM K1 bit setting*/
3812 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3813 		if (ret_val)
3814 			return ret_val;
3815 
3816 		if (kum_cfg & E1000_NVM_K1_ENABLE)
3817 			dev_spec->nvm_k1_enabled = TRUE;
3818 		else
3819 			dev_spec->nvm_k1_enabled = FALSE;
3820 	}
3821 
3822 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3823 
3824 	if (!hw->phy.ops.check_reset_block(hw)) {
3825 		/* Full-chip reset requires MAC and PHY reset at the same
3826 		 * time to make sure the interface between MAC and the
3827 		 * external PHY is reset.
3828 		 */
3829 		ctrl |= E1000_CTRL_PHY_RST;
3830 
3831 		/* Gate automatic PHY configuration by hardware on
3832 		 * non-managed 82579
3833 		 */
3834 		if ((hw->mac.type == e1000_pch2lan) &&
3835 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3836 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3837 	}
3838 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3839 	DEBUGOUT("Issuing a global reset to ich8lan\n");
3840 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3841 	/* cannot issue a flush here because it hangs the hardware */
3842 	msec_delay(20);
3843 
3844 	/* Set Phy Config Counter to 50msec */
3845 	if (hw->mac.type == e1000_pch2lan) {
3846 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
3847 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3848 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3849 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
3850 	}
3851 
3852 	if (ctrl & E1000_CTRL_PHY_RST) {
3853 		ret_val = hw->phy.ops.get_cfg_done(hw);
3854 		if (ret_val)
3855 			return ret_val;
3856 
3857 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3858 		if (ret_val)
3859 			return ret_val;
3860 	}
3861 
3862 	/* For PCH, this write will make sure that any noise
3863 	 * will be detected as a CRC error and be dropped rather than show up
3864 	 * as a bad packet to the DMA engine.
3865 	 */
3866 	if (hw->mac.type == e1000_pchlan)
3867 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3868 
3869 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3870 	E1000_READ_REG(hw, E1000_ICR);
3871 
3872 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
3873 	reg |= E1000_KABGTXD_BGSQLBIAS;
3874 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
3875 
3876 	return E1000_SUCCESS;
3877 }
3878 
3879 /**
3880  *  e1000_init_hw_ich8lan - Initialize the hardware
3881  *  @hw: pointer to the HW structure
3882  *
3883  *  Prepares the hardware for transmit and receive by doing the following:
3884  *   - initialize hardware bits
3885  *   - initialize LED identification
3886  *   - setup receive address registers
3887  *   - setup flow control
3888  *   - setup transmit descriptors
3889  *   - clear statistics
3890  **/
3891 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3892 {
3893 	struct e1000_mac_info *mac = &hw->mac;
3894 	u32 ctrl_ext, txdctl, snoop;
3895 	s32 ret_val;
3896 	u16 i;
3897 
3898 	DEBUGFUNC("e1000_init_hw_ich8lan");
3899 
3900 	e1000_initialize_hw_bits_ich8lan(hw);
3901 
3902 	/* Initialize identification LED */
3903 	ret_val = mac->ops.id_led_init(hw);
3904 	/* An error is not fatal and we should not stop init due to this */
3905 	if (ret_val)
3906 		DEBUGOUT("Error initializing identification LED\n");
3907 
3908 	/* Setup the receive address. */
3909 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3910 
3911 	/* Zero out the Multicast HASH table */
3912 	DEBUGOUT("Zeroing the MTA\n");
3913 	for (i = 0; i < mac->mta_reg_count; i++)
3914 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3915 
3916 	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
3917 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
3918 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3919 	 */
3920 	if (hw->phy.type == e1000_phy_82578) {
3921 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3922 		i &= ~BM_WUC_HOST_WU_BIT;
3923 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3924 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
3925 		if (ret_val)
3926 			return ret_val;
3927 	}
3928 
3929 	/* Setup link and flow control */
3930 	ret_val = mac->ops.setup_link(hw);
3931 
3932 	/* Set the transmit descriptor write-back policy for both queues */
3933 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3934 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3935 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3936 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3937 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3938 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3939 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3940 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3941 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3942 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3943 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3944 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3945 
3946 	/* ICH8 has opposite polarity of no_snoop bits.
3947 	 * By default, we should use snoop behavior.
3948 	 */
3949 	if (mac->type == e1000_ich8lan)
3950 		snoop = PCIE_ICH8_SNOOP_ALL;
3951 	else
3952 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3953 	e1000_set_pcie_no_snoop_generic(hw, snoop);
3954 
3955 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3956 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3957 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3958 
3959 	/* Clear all of the statistics registers (clear on read).  It is
3960 	 * important that we do this after we have tried to establish link
3961 	 * because the symbol error count will increment wildly if there
3962 	 * is no link.
3963 	 */
3964 	e1000_clear_hw_cntrs_ich8lan(hw);
3965 
3966 	return ret_val;
3967 }
3968 
3969 /**
3970  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3971  *  @hw: pointer to the HW structure
3972  *
3973  *  Sets/Clears required hardware bits necessary for correctly setting up the
3974  *  hardware for transmit and receive.
3975  **/
3976 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3977 {
3978 	u32 reg;
3979 
3980 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3981 
3982 	/* Extended Device Control */
3983 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3984 	reg |= (1 << 22);
3985 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3986 	if (hw->mac.type >= e1000_pchlan)
3987 		reg |= E1000_CTRL_EXT_PHYPDEN;
3988 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3989 
3990 	/* Transmit Descriptor Control 0 */
3991 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3992 	reg |= (1 << 22);
3993 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3994 
3995 	/* Transmit Descriptor Control 1 */
3996 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3997 	reg |= (1 << 22);
3998 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3999 
4000 	/* Transmit Arbitration Control 0 */
4001 	reg = E1000_READ_REG(hw, E1000_TARC(0));
4002 	if (hw->mac.type == e1000_ich8lan)
4003 		reg |= (1 << 28) | (1 << 29);
4004 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4005 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4006 
4007 	/* Transmit Arbitration Control 1 */
4008 	reg = E1000_READ_REG(hw, E1000_TARC(1));
4009 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4010 		reg &= ~(1 << 28);
4011 	else
4012 		reg |= (1 << 28);
4013 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
4014 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4015 
4016 	/* Device Status */
4017 	if (hw->mac.type == e1000_ich8lan) {
4018 		reg = E1000_READ_REG(hw, E1000_STATUS);
4019 		reg &= ~(1 << 31);
4020 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
4021 	}
4022 
4023 	/* work-around descriptor data corruption issue during nfs v2 udp
4024 	 * traffic, just disable the nfs filtering capability
4025 	 */
4026 	reg = E1000_READ_REG(hw, E1000_RFCTL);
4027 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4028 	/* Disable IPv6 extension header parsing because some malformed
4029 	 * IPv6 headers can hang the Rx.
4030 	 */
4031 	if (hw->mac.type == e1000_ich8lan)
4032 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4033 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4034 
4035 	/* Enable ECC on Lynxpoint */
4036 	if (hw->mac.type == e1000_pch_lpt) {
4037 		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4038 		reg |= E1000_PBECCSTS_ECC_ENABLE;
4039 		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4040 
4041 		reg = E1000_READ_REG(hw, E1000_CTRL);
4042 		reg |= E1000_CTRL_MEHE;
4043 		E1000_WRITE_REG(hw, E1000_CTRL, reg);
4044 	}
4045 
4046 	return;
4047 }
4048 
4049 /**
4050  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4051  *  @hw: pointer to the HW structure
4052  *
4053  *  Determines which flow control settings to use, then configures flow
4054  *  control.  Calls the appropriate media-specific link configuration
4055  *  function.  Assuming the adapter has a valid link partner, a valid link
4056  *  should be established.  Assumes the hardware has previously been reset
4057  *  and the transmitter and receiver are not enabled.
4058  **/
4059 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4060 {
4061 	s32 ret_val;
4062 
4063 	DEBUGFUNC("e1000_setup_link_ich8lan");
4064 
4065 	if (hw->phy.ops.check_reset_block(hw))
4066 		return E1000_SUCCESS;
4067 
4068 	/* ICH parts do not have a word in the NVM to determine
4069 	 * the default flow control setting, so we explicitly
4070 	 * set it to full.
4071 	 */
4072 	if (hw->fc.requested_mode == e1000_fc_default)
4073 		hw->fc.requested_mode = e1000_fc_full;
4074 
4075 	/* Save off the requested flow control mode for use later.  Depending
4076 	 * on the link partner's capabilities, we may or may not use this mode.
4077 	 */
4078 	hw->fc.current_mode = hw->fc.requested_mode;
4079 
4080 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4081 		hw->fc.current_mode);
4082 
4083 	/* Continue to configure the copper link. */
4084 	ret_val = hw->mac.ops.setup_physical_interface(hw);
4085 	if (ret_val)
4086 		return ret_val;
4087 
4088 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4089 	if ((hw->phy.type == e1000_phy_82578) ||
4090 	    (hw->phy.type == e1000_phy_82579) ||
4091 	    (hw->phy.type == e1000_phy_i217) ||
4092 	    (hw->phy.type == e1000_phy_82577)) {
4093 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4094 
4095 		ret_val = hw->phy.ops.write_reg(hw,
4096 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
4097 					     hw->fc.pause_time);
4098 		if (ret_val)
4099 			return ret_val;
4100 	}
4101 
4102 	return e1000_set_fc_watermarks_generic(hw);
4103 }
4104 
4105 /**
4106  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4107  *  @hw: pointer to the HW structure
4108  *
4109  *  Configures the kumeran interface to the PHY to wait the appropriate time
4110  *  when polling the PHY, then call the generic setup_copper_link to finish
4111  *  configuring the copper link.
4112  **/
4113 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4114 {
4115 	u32 ctrl;
4116 	s32 ret_val;
4117 	u16 reg_data;
4118 
4119 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4120 
4121 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4122 	ctrl |= E1000_CTRL_SLU;
4123 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4124 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4125 
4126 	/* Set the mac to wait the maximum time between each iteration
4127 	 * and increase the max iterations when polling the phy;
4128 	 * this fixes erroneous timeouts at 10Mbps.
4129 	 */
4130 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4131 					       0xFFFF);
4132 	if (ret_val)
4133 		return ret_val;
4134 	ret_val = e1000_read_kmrn_reg_generic(hw,
4135 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
4136 					      &reg_data);
4137 	if (ret_val)
4138 		return ret_val;
4139 	reg_data |= 0x3F;
4140 	ret_val = e1000_write_kmrn_reg_generic(hw,
4141 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
4142 					       reg_data);
4143 	if (ret_val)
4144 		return ret_val;
4145 
4146 	switch (hw->phy.type) {
4147 	case e1000_phy_igp_3:
4148 		ret_val = e1000_copper_link_setup_igp(hw);
4149 		if (ret_val)
4150 			return ret_val;
4151 		break;
4152 	case e1000_phy_bm:
4153 	case e1000_phy_82578:
4154 		ret_val = e1000_copper_link_setup_m88(hw);
4155 		if (ret_val)
4156 			return ret_val;
4157 		break;
4158 	case e1000_phy_82577:
4159 	case e1000_phy_82579:
4160 		ret_val = e1000_copper_link_setup_82577(hw);
4161 		if (ret_val)
4162 			return ret_val;
4163 		break;
4164 	case e1000_phy_ife:
4165 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4166 					       &reg_data);
4167 		if (ret_val)
4168 			return ret_val;
4169 
4170 		reg_data &= ~IFE_PMC_AUTO_MDIX;
4171 
4172 		switch (hw->phy.mdix) {
4173 		case 1:
4174 			reg_data &= ~IFE_PMC_FORCE_MDIX;
4175 			break;
4176 		case 2:
4177 			reg_data |= IFE_PMC_FORCE_MDIX;
4178 			break;
4179 		case 0:
4180 		default:
4181 			reg_data |= IFE_PMC_AUTO_MDIX;
4182 			break;
4183 		}
4184 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4185 						reg_data);
4186 		if (ret_val)
4187 			return ret_val;
4188 		break;
4189 	default:
4190 		break;
4191 	}
4192 
4193 	return e1000_setup_copper_link_generic(hw);
4194 }
4195 
4196 /**
4197  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4198  *  @hw: pointer to the HW structure
4199  *
4200  *  Calls the PHY specific link setup function and then calls the
4201  *  generic setup_copper_link to finish configuring the link for
4202  *  Lynxpoint PCH devices
4203  **/
4204 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4205 {
4206 	u32 ctrl;
4207 	s32 ret_val;
4208 
4209 	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4210 
4211 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4212 	ctrl |= E1000_CTRL_SLU;
4213 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4214 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4215 
4216 	ret_val = e1000_copper_link_setup_82577(hw);
4217 	if (ret_val)
4218 		return ret_val;
4219 
4220 	return e1000_setup_copper_link_generic(hw);
4221 }
4222 
4223 /**
4224  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4225  *  @hw: pointer to the HW structure
4226  *  @speed: pointer to store current link speed
4227  *  @duplex: pointer to store the current link duplex
4228  *
4229  *  Calls the generic get_speed_and_duplex to retrieve the current link
4230  *  information and then calls the Kumeran lock loss workaround for links at
4231  *  gigabit speeds.
4232  **/
4233 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4234 					  u16 *duplex)
4235 {
4236 	s32 ret_val;
4237 
4238 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4239 
4240 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4241 	if (ret_val)
4242 		return ret_val;
4243 
4244 	if ((hw->mac.type == e1000_ich8lan) &&
4245 	    (hw->phy.type == e1000_phy_igp_3) &&
4246 	    (*speed == SPEED_1000)) {
4247 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4248 	}
4249 
4250 	return ret_val;
4251 }
4252 
4253 /**
4254  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4255  *  @hw: pointer to the HW structure
4256  *
4257  *  Work-around for 82566 Kumeran PCS lock loss:
4258  *  On link status change (i.e. PCI reset, speed change) and link is up and
4259  *  speed is gigabit-
4260  *    0) if workaround is optionally disabled do nothing
4261  *    1) wait 1ms for Kumeran link to come up
4262  *    2) check Kumeran Diagnostic register PCS lock loss bit
4263  *    3) if not set the link is locked (all is good), otherwise...
4264  *    4) reset the PHY
4265  *    5) repeat up to 10 times
4266  *  Note: this is only called for IGP3 copper when speed is 1gb.
4267  **/
4268 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4269 {
4270 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4271 	u32 phy_ctrl;
4272 	s32 ret_val;
4273 	u16 i, data;
4274 	bool link;
4275 
4276 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4277 
4278 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4279 		return E1000_SUCCESS;
4280 
4281 	/* Make sure link is up before proceeding.  If not just return.
4282 	 * Attempting this while link is negotiating fouled up link
4283 	 * stability
4284 	 */
4285 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4286 	if (!link)
4287 		return E1000_SUCCESS;
4288 
4289 	for (i = 0; i < 10; i++) {
4290 		/* read once to clear */
4291 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4292 		if (ret_val)
4293 			return ret_val;
4294 		/* and again to get new status */
4295 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4296 		if (ret_val)
4297 			return ret_val;
4298 
4299 		/* check for PCS lock */
4300 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4301 			return E1000_SUCCESS;
4302 
4303 		/* Issue PHY reset */
4304 		hw->phy.ops.reset(hw);
4305 		msec_delay_irq(5);
4306 	}
4307 	/* Disable GigE link negotiation */
4308 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4309 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4310 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4311 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4312 
4313 	/* Call gig speed drop workaround on Gig disable before accessing
4314 	 * any PHY registers
4315 	 */
4316 	e1000_gig_downshift_workaround_ich8lan(hw);
4317 
4318 	/* unable to acquire PCS lock */
4319 	return -E1000_ERR_PHY;
4320 }
4321 
4322 /**
4323  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4324  *  @hw: pointer to the HW structure
4325  *  @state: boolean value used to set the current Kumeran workaround state
4326  *
4327  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
4328  *  /disabled - FALSE).
4329  **/
4330 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4331 						 bool state)
4332 {
4333 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4334 
4335 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4336 
4337 	if (hw->mac.type != e1000_ich8lan) {
4338 		DEBUGOUT("Workaround applies to ICH8 only.\n");
4339 		return;
4340 	}
4341 
4342 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
4343 
4344 	return;
4345 }
4346 
4347 /**
4348  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4349  *  @hw: pointer to the HW structure
4350  *
4351  *  Workaround for 82566 power-down on D3 entry:
4352  *    1) disable gigabit link
4353  *    2) write VR power-down enable
4354  *    3) read it back
4355  *  Continue if successful, else issue LCD reset and repeat
4356  **/
4357 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4358 {
4359 	u32 reg;
4360 	u16 data;
4361 	u8  retry = 0;
4362 
4363 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4364 
4365 	if (hw->phy.type != e1000_phy_igp_3)
4366 		return;
4367 
4368 	/* Try the workaround twice (if needed) */
4369 	do {
4370 		/* Disable link */
4371 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4372 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4373 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4374 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4375 
4376 		/* Call gig speed drop workaround on Gig disable before
4377 		 * accessing any PHY registers
4378 		 */
4379 		if (hw->mac.type == e1000_ich8lan)
4380 			e1000_gig_downshift_workaround_ich8lan(hw);
4381 
4382 		/* Write VR power-down enable */
4383 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4384 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4385 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4386 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4387 
4388 		/* Read it back and test */
4389 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4390 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4391 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4392 			break;
4393 
4394 		/* Issue PHY reset and repeat at most one more time */
4395 		reg = E1000_READ_REG(hw, E1000_CTRL);
4396 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4397 		retry++;
4398 	} while (retry);
4399 }
4400 
4401 /**
4402  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4403  *  @hw: pointer to the HW structure
4404  *
4405  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4406  *  LPLU, Gig disable, MDIC PHY reset):
4407  *    1) Set Kumeran Near-end loopback
4408  *    2) Clear Kumeran Near-end loopback
4409  *  Should only be called for ICH8[m] devices with any 1G Phy.
4410  **/
4411 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4412 {
4413 	s32 ret_val;
4414 	u16 reg_data;
4415 
4416 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4417 
4418 	if ((hw->mac.type != e1000_ich8lan) ||
4419 	    (hw->phy.type == e1000_phy_ife))
4420 		return;
4421 
4422 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4423 					      &reg_data);
4424 	if (ret_val)
4425 		return;
4426 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4427 	ret_val = e1000_write_kmrn_reg_generic(hw,
4428 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
4429 					       reg_data);
4430 	if (ret_val)
4431 		return;
4432 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4433 	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4434 				     reg_data);
4435 }
4436 
4437 /**
4438  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4439  *  @hw: pointer to the HW structure
4440  *
4441  *  During S0 to Sx transition, it is possible the link remains at gig
4442  *  instead of negotiating to a lower speed.  Before going to Sx, set
4443  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4444  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4445  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4446  *  needs to be written.
4447  *  Parts that support (and are linked to a partner which support) EEE in
4448  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4449  *  than 10Mbps w/o EEE.
4450  **/
4451 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4452 {
4453 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4454 	u32 phy_ctrl;
4455 	s32 ret_val;
4456 
4457 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4458 
4459 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4460 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4461 
4462 	if (hw->phy.type == e1000_phy_i217) {
4463 		u16 phy_reg, device_id = hw->device_id;
4464 
4465 		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4466 		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4467 			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4468 
4469 			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4470 					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4471 		}
4472 
4473 		ret_val = hw->phy.ops.acquire(hw);
4474 		if (ret_val)
4475 			goto out;
4476 
4477 		if (!dev_spec->eee_disable) {
4478 			u16 eee_advert;
4479 
4480 			ret_val =
4481 			    e1000_read_emi_reg_locked(hw,
4482 						      I217_EEE_ADVERTISEMENT,
4483 						      &eee_advert);
4484 			if (ret_val)
4485 				goto release;
4486 
4487 			/* Disable LPLU if both link partners support 100BaseT
4488 			 * EEE and 100Full is advertised on both ends of the
4489 			 * link.
4490 			 */
4491 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4492 			    (dev_spec->eee_lp_ability &
4493 			     I82579_EEE_100_SUPPORTED) &&
4494 			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4495 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4496 					      E1000_PHY_CTRL_NOND0A_LPLU);
4497 		}
4498 
4499 		/* For i217 Intel Rapid Start Technology support,
4500 		 * when the system is going into Sx and no manageability engine
4501 		 * is present, the driver must configure proxy to reset only on
4502 		 * power good.  LPI (Low Power Idle) state must also reset only
4503 		 * on power good, as well as the MTA (Multicast table array).
4504 		 * The SMBus release must also be disabled on LCD reset.
4505 		 */
4506 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
4507 			E1000_ICH_FWSM_FW_VALID)) {
4508 			/* Enable proxy to reset only on power good. */
4509 			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4510 						    &phy_reg);
4511 			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4512 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4513 						     phy_reg);
4514 
4515 			/* Set bit enable LPI (EEE) to reset only on
4516 			 * power good.
4517 			*/
4518 			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4519 			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4520 			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4521 
4522 			/* Disable the SMB release on LCD reset. */
4523 			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4524 			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4525 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4526 		}
4527 
4528 		/* Enable MTA to reset for Intel Rapid Start Technology
4529 		 * Support
4530 		 */
4531 		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4532 		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4533 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4534 
4535 release:
4536 		hw->phy.ops.release(hw);
4537 	}
4538 out:
4539 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4540 
4541 	if (hw->mac.type == e1000_ich8lan)
4542 		e1000_gig_downshift_workaround_ich8lan(hw);
4543 
4544 	if (hw->mac.type >= e1000_pchlan) {
4545 		e1000_oem_bits_config_ich8lan(hw, FALSE);
4546 
4547 		/* Reset PHY to activate OEM bits on 82577/8 */
4548 		if (hw->mac.type == e1000_pchlan)
4549 			e1000_phy_hw_reset_generic(hw);
4550 
4551 		ret_val = hw->phy.ops.acquire(hw);
4552 		if (ret_val)
4553 			return;
4554 		e1000_write_smbus_addr(hw);
4555 		hw->phy.ops.release(hw);
4556 	}
4557 
4558 	return;
4559 }
4560 
4561 /**
4562  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4563  *  @hw: pointer to the HW structure
4564  *
4565  *  During Sx to S0 transitions on non-managed devices or managed devices
4566  *  on which PHY resets are not blocked, if the PHY registers cannot be
4567  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4568  *  the PHY.
4569  *  On i217, setup Intel Rapid Start Technology.
4570  **/
4571 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4572 {
4573 	s32 ret_val;
4574 
4575 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
4576 
4577 	if (hw->mac.type < e1000_pch2lan)
4578 		return;
4579 
4580 	ret_val = e1000_init_phy_workarounds_pchlan(hw);
4581 	if (ret_val) {
4582 		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4583 		return;
4584 	}
4585 
4586 	/* For i217 Intel Rapid Start Technology support when the system
4587 	 * is transitioning from Sx and no manageability engine is present
4588 	 * configure SMBus to restore on reset, disable proxy, and enable
4589 	 * the reset on MTA (Multicast table array).
4590 	 */
4591 	if (hw->phy.type == e1000_phy_i217) {
4592 		u16 phy_reg;
4593 
4594 		ret_val = hw->phy.ops.acquire(hw);
4595 		if (ret_val) {
4596 			DEBUGOUT("Failed to setup iRST\n");
4597 			return;
4598 		}
4599 
4600 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
4601 		    E1000_ICH_FWSM_FW_VALID)) {
4602 			/* Restore clear on SMB if no manageability engine
4603 			 * is present
4604 			 */
4605 			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4606 							      &phy_reg);
4607 			if (ret_val)
4608 				goto release;
4609 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4610 			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4611 
4612 			/* Disable Proxy */
4613 			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4614 		}
4615 		/* Enable reset on MTA */
4616 		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4617 						      &phy_reg);
4618 		if (ret_val)
4619 			goto release;
4620 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4621 		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4622 release:
4623 		if (ret_val)
4624 			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4625 		hw->phy.ops.release(hw);
4626 	}
4627 }
4628 
4629 /**
4630  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
4631  *  @hw: pointer to the HW structure
4632  *
4633  *  Return the LED back to the default configuration.
4634  **/
4635 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4636 {
4637 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
4638 
4639 	if (hw->phy.type == e1000_phy_ife)
4640 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4641 					     0);
4642 
4643 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4644 	return E1000_SUCCESS;
4645 }
4646 
4647 /**
4648  *  e1000_led_on_ich8lan - Turn LEDs on
4649  *  @hw: pointer to the HW structure
4650  *
4651  *  Turn on the LEDs.
4652  **/
4653 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4654 {
4655 	DEBUGFUNC("e1000_led_on_ich8lan");
4656 
4657 	if (hw->phy.type == e1000_phy_ife)
4658 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4659 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4660 
4661 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4662 	return E1000_SUCCESS;
4663 }
4664 
4665 /**
4666  *  e1000_led_off_ich8lan - Turn LEDs off
4667  *  @hw: pointer to the HW structure
4668  *
4669  *  Turn off the LEDs.
4670  **/
4671 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4672 {
4673 	DEBUGFUNC("e1000_led_off_ich8lan");
4674 
4675 	if (hw->phy.type == e1000_phy_ife)
4676 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4677 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4678 
4679 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4680 	return E1000_SUCCESS;
4681 }
4682 
4683 /**
4684  *  e1000_setup_led_pchlan - Configures SW controllable LED
4685  *  @hw: pointer to the HW structure
4686  *
4687  *  This prepares the SW controllable LED for use.
4688  **/
4689 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4690 {
4691 	DEBUGFUNC("e1000_setup_led_pchlan");
4692 
4693 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4694 				     (u16)hw->mac.ledctl_mode1);
4695 }
4696 
4697 /**
4698  *  e1000_cleanup_led_pchlan - Restore the default LED operation
4699  *  @hw: pointer to the HW structure
4700  *
4701  *  Return the LED back to the default configuration.
4702  **/
4703 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4704 {
4705 	DEBUGFUNC("e1000_cleanup_led_pchlan");
4706 
4707 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4708 				     (u16)hw->mac.ledctl_default);
4709 }
4710 
4711 /**
4712  *  e1000_led_on_pchlan - Turn LEDs on
4713  *  @hw: pointer to the HW structure
4714  *
4715  *  Turn on the LEDs.
4716  **/
4717 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4718 {
4719 	u16 data = (u16)hw->mac.ledctl_mode2;
4720 	u32 i, led;
4721 
4722 	DEBUGFUNC("e1000_led_on_pchlan");
4723 
4724 	/* If no link, then turn LED on by setting the invert bit
4725 	 * for each LED that's mode is "link_up" in ledctl_mode2.
4726 	 */
4727 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4728 		for (i = 0; i < 3; i++) {
4729 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4730 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4731 			    E1000_LEDCTL_MODE_LINK_UP)
4732 				continue;
4733 			if (led & E1000_PHY_LED0_IVRT)
4734 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4735 			else
4736 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4737 		}
4738 	}
4739 
4740 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4741 }
4742 
4743 /**
4744  *  e1000_led_off_pchlan - Turn LEDs off
4745  *  @hw: pointer to the HW structure
4746  *
4747  *  Turn off the LEDs.
4748  **/
4749 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4750 {
4751 	u16 data = (u16)hw->mac.ledctl_mode1;
4752 	u32 i, led;
4753 
4754 	DEBUGFUNC("e1000_led_off_pchlan");
4755 
4756 	/* If no link, then turn LED off by clearing the invert bit
4757 	 * for each LED that's mode is "link_up" in ledctl_mode1.
4758 	 */
4759 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4760 		for (i = 0; i < 3; i++) {
4761 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4762 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4763 			    E1000_LEDCTL_MODE_LINK_UP)
4764 				continue;
4765 			if (led & E1000_PHY_LED0_IVRT)
4766 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4767 			else
4768 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4769 		}
4770 	}
4771 
4772 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4773 }
4774 
4775 /**
4776  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4777  *  @hw: pointer to the HW structure
4778  *
4779  *  Read appropriate register for the config done bit for completion status
4780  *  and configure the PHY through s/w for EEPROM-less parts.
4781  *
4782  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4783  *  config done bit, so only an error is logged and continues.  If we were
4784  *  to return with error, EEPROM-less silicon would not be able to be reset
4785  *  or change link.
4786  **/
4787 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4788 {
4789 	s32 ret_val = E1000_SUCCESS;
4790 	u32 bank = 0;
4791 	u32 status;
4792 
4793 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4794 
4795 	e1000_get_cfg_done_generic(hw);
4796 
4797 	/* Wait for indication from h/w that it has completed basic config */
4798 	if (hw->mac.type >= e1000_ich10lan) {
4799 		e1000_lan_init_done_ich8lan(hw);
4800 	} else {
4801 		ret_val = e1000_get_auto_rd_done_generic(hw);
4802 		if (ret_val) {
4803 			/* When auto config read does not complete, do not
4804 			 * return with an error. This can happen in situations
4805 			 * where there is no eeprom and prevents getting link.
4806 			 */
4807 			DEBUGOUT("Auto Read Done did not complete\n");
4808 			ret_val = E1000_SUCCESS;
4809 		}
4810 	}
4811 
4812 	/* Clear PHY Reset Asserted bit */
4813 	status = E1000_READ_REG(hw, E1000_STATUS);
4814 	if (status & E1000_STATUS_PHYRA)
4815 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4816 	else
4817 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4818 
4819 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4820 	if (hw->mac.type <= e1000_ich9lan) {
4821 		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
4822 		    (hw->phy.type == e1000_phy_igp_3)) {
4823 			e1000_phy_init_script_igp3(hw);
4824 		}
4825 	} else {
4826 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4827 			/* Maybe we should do a basic PHY config */
4828 			DEBUGOUT("EEPROM not present\n");
4829 			ret_val = -E1000_ERR_CONFIG;
4830 		}
4831 	}
4832 
4833 	return ret_val;
4834 }
4835 
4836 /**
4837  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4838  * @hw: pointer to the HW structure
4839  *
4840  * In the case of a PHY power down to save power, or to turn off link during a
4841  * driver unload, or wake on lan is not enabled, remove the link.
4842  **/
4843 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4844 {
4845 	/* If the management interface is not enabled, then power down */
4846 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4847 	      hw->phy.ops.check_reset_block(hw)))
4848 		e1000_power_down_phy_copper(hw);
4849 
4850 	return;
4851 }
4852 
4853 /**
4854  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4855  *  @hw: pointer to the HW structure
4856  *
4857  *  Clears hardware counters specific to the silicon family and calls
4858  *  clear_hw_cntrs_generic to clear all general purpose counters.
4859  **/
4860 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4861 {
4862 	u16 phy_data;
4863 	s32 ret_val;
4864 
4865 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4866 
4867 	e1000_clear_hw_cntrs_base_generic(hw);
4868 
4869 	E1000_READ_REG(hw, E1000_ALGNERRC);
4870 	E1000_READ_REG(hw, E1000_RXERRC);
4871 	E1000_READ_REG(hw, E1000_TNCRS);
4872 	E1000_READ_REG(hw, E1000_CEXTERR);
4873 	E1000_READ_REG(hw, E1000_TSCTC);
4874 	E1000_READ_REG(hw, E1000_TSCTFC);
4875 
4876 	E1000_READ_REG(hw, E1000_MGTPRC);
4877 	E1000_READ_REG(hw, E1000_MGTPDC);
4878 	E1000_READ_REG(hw, E1000_MGTPTC);
4879 
4880 	E1000_READ_REG(hw, E1000_IAC);
4881 	E1000_READ_REG(hw, E1000_ICRXOC);
4882 
4883 	/* Clear PHY statistics registers */
4884 	if ((hw->phy.type == e1000_phy_82578) ||
4885 	    (hw->phy.type == e1000_phy_82579) ||
4886 	    (hw->phy.type == e1000_phy_i217) ||
4887 	    (hw->phy.type == e1000_phy_82577)) {
4888 		ret_val = hw->phy.ops.acquire(hw);
4889 		if (ret_val)
4890 			return;
4891 		ret_val = hw->phy.ops.set_page(hw,
4892 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
4893 		if (ret_val)
4894 			goto release;
4895 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4896 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4897 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4898 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4899 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4900 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4901 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4902 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4903 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4904 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4905 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4906 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4907 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4908 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4909 release:
4910 		hw->phy.ops.release(hw);
4911 	}
4912 }
4913 
4914