xref: /dragonfly/sys/dev/netif/ig_hal/e1000_ich8lan.c (revision cecb9aae)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2011, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD:$*/
34 
35 /*
36  * 82562G 10/100 Network Connection
37  * 82562G-2 10/100 Network Connection
38  * 82562GT 10/100 Network Connection
39  * 82562GT-2 10/100 Network Connection
40  * 82562V 10/100 Network Connection
41  * 82562V-2 10/100 Network Connection
42  * 82566DC-2 Gigabit Network Connection
43  * 82566DC Gigabit Network Connection
44  * 82566DM-2 Gigabit Network Connection
45  * 82566DM Gigabit Network Connection
46  * 82566MC Gigabit Network Connection
47  * 82566MM Gigabit Network Connection
48  * 82567LM Gigabit Network Connection
49  * 82567LF Gigabit Network Connection
50  * 82567V Gigabit Network Connection
51  * 82567LM-2 Gigabit Network Connection
52  * 82567LF-2 Gigabit Network Connection
53  * 82567V-2 Gigabit Network Connection
54  * 82567LF-3 Gigabit Network Connection
55  * 82567LM-3 Gigabit Network Connection
56  * 82567LM-4 Gigabit Network Connection
57  * 82577LM Gigabit Network Connection
58  * 82577LC Gigabit Network Connection
59  * 82578DM Gigabit Network Connection
60  * 82578DC Gigabit Network Connection
61  * 82579LM Gigabit Network Connection
62  * 82579V Gigabit Network Connection
63  */
64 
65 #include "e1000_api.h"
66 
67 static s32  e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32  e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32  e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
79 					      u8 *mc_addr_list,
80 					      u32 mc_addr_count);
81 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
82 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
83 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
84 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
85 					    bool active);
86 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
87 					    bool active);
88 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
89 				   u16 words, u16 *data);
90 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
91 				    u16 words, u16 *data);
92 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
93 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
94 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
95 					    u16 *data);
96 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
97 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
98 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
99 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
102 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
103 					   u16 *speed, u16 *duplex);
104 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
105 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
108 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
109 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
110 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
111 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
112 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
113 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
114 static s32  e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
115 static s32  e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
116 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
117 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
118 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
119 					  u32 offset, u8 *data);
120 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
121 					  u8 size, u16 *data);
122 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
123 					  u32 offset, u16 *data);
124 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
125 						 u32 offset, u8 byte);
126 static s32  e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
127 					   u32 offset, u8 data);
128 static s32  e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
129 					   u8 size, u16 data);
130 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
131 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
133 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
134 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
135 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138 
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142 	struct ich8_hsfsts {
143 		u16 flcdone:1; /* bit 0 Flash Cycle Done */
144 		u16 flcerr:1; /* bit 1 Flash Cycle Error */
145 		u16 dael:1; /* bit 2 Direct Access error Log */
146 		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147 		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148 		u16 reserved1:2; /* bit 13:6 Reserved */
149 		u16 reserved2:6; /* bit 13:6 Reserved */
150 		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151 		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152 	} hsf_status;
153 	u16 regval;
154 };
155 
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159 	struct ich8_hsflctl {
160 		u16 flcgo:1;   /* 0 Flash Cycle Go */
161 		u16 flcycle:2;   /* 2:1 Flash Cycle */
162 		u16 reserved:5;   /* 7:3 Reserved  */
163 		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164 		u16 flockdn:6;   /* 15:10 Reserved */
165 	} hsf_ctrl;
166 	u16 regval;
167 };
168 
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171 	struct ich8_flracc {
172 		u32 grra:8; /* 0:7 GbE region Read Access */
173 		u32 grwa:8; /* 8:15 GbE region Write Access */
174 		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175 		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176 	} hsf_flregacc;
177 	u16 regval;
178 };
179 
180 static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
181 {
182 	u32 reg;
183 
184 	DEBUGFUNC("e1000_toggle_lanphypc_value_ich8lan");
185 
186 	/* Set Phy Config Counter to 50msec */
187 	reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
188 	reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
189 	reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
190 	E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
191 
192 	/* Toggle LANPHYPC Value bit */
193 	reg = E1000_READ_REG(hw, E1000_CTRL);
194 	reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
195 	reg &= ~E1000_CTRL_LANPHYPC_VALUE;
196 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
197 	E1000_WRITE_FLUSH(hw);
198 	usec_delay(10);
199 	reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
200 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
201 }
202 
203 /**
204  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
205  *  @hw: pointer to the HW structure
206  *
207  *  Initialize family-specific PHY parameters and function pointers.
208  **/
209 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
210 {
211 	struct e1000_phy_info *phy = &hw->phy;
212 	s32 ret_val = E1000_SUCCESS;
213 
214 	DEBUGFUNC("e1000_init_phy_params_pchlan");
215 
216 	phy->addr		= 1;
217 	phy->reset_delay_us	= 100;
218 
219 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
220 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
221 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
222 	phy->ops.set_page	= e1000_set_page_igp;
223 	phy->ops.read_reg	= e1000_read_phy_reg_hv;
224 	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
225 	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
226 	phy->ops.release	= e1000_release_swflag_ich8lan;
227 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
228 	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
229 	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
230 	phy->ops.write_reg	= e1000_write_phy_reg_hv;
231 	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
232 	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
233 	phy->ops.power_up	= e1000_power_up_phy_copper;
234 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
235 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
236 
237 	if (!hw->phy.ops.check_reset_block(hw)) {
238 		u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
239 
240 		/*
241 		 * The MAC-PHY interconnect may still be in SMBus mode after
242 		 * Sx->S0.  If resetting the PHY is not blocked, toggle the
243 		 * LANPHYPC Value bit to force the interconnect to PCIe mode.
244 		 */
245 		e1000_toggle_lanphypc_value_ich8lan(hw);
246 		msec_delay(50);
247 
248 		/*
249 		 * Gate automatic PHY configuration by hardware on
250 		 * non-managed 82579
251 		 */
252 		if ((hw->mac.type == e1000_pch2lan) &&
253 		    !(fwsm & E1000_ICH_FWSM_FW_VALID))
254 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
255 
256 		/*
257 		 * Reset the PHY before any access to it.  Doing so, ensures
258 		 * that the PHY is in a known good state before we read/write
259 		 * PHY registers.  The generic reset is sufficient here,
260 		 * because we haven't determined the PHY type yet.
261 		 */
262 		ret_val = e1000_phy_hw_reset_generic(hw);
263 		if (ret_val)
264 			return ret_val;
265 
266 		/* Ungate automatic PHY configuration on non-managed 82579 */
267 		if ((hw->mac.type == e1000_pch2lan) &&
268 		    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
269 			msec_delay(10);
270 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
271 		}
272 	}
273 
274 	phy->id = e1000_phy_unknown;
275 	switch (hw->mac.type) {
276 	default:
277 		ret_val = e1000_get_phy_id(hw);
278 		if (ret_val)
279 			return ret_val;
280 		if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
281 			break;
282 		/* fall-through */
283 	case e1000_pch2lan:
284 		/*
285 		 * In case the PHY needs to be in mdio slow mode,
286 		 * set slow mode and try to get the PHY id again.
287 		 */
288 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
289 		if (ret_val)
290 			return ret_val;
291 		ret_val = e1000_get_phy_id(hw);
292 		if (ret_val)
293 			return ret_val;
294 		break;
295 	}
296 	phy->type = e1000_get_phy_type_from_id(phy->id);
297 
298 	switch (phy->type) {
299 	case e1000_phy_82577:
300 	case e1000_phy_82579:
301 		phy->ops.check_polarity = e1000_check_polarity_82577;
302 		phy->ops.force_speed_duplex =
303 			e1000_phy_force_speed_duplex_82577;
304 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
305 		phy->ops.get_info = e1000_get_phy_info_82577;
306 		phy->ops.commit = e1000_phy_sw_reset_generic;
307 		break;
308 	case e1000_phy_82578:
309 		phy->ops.check_polarity = e1000_check_polarity_m88;
310 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
311 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
312 		phy->ops.get_info = e1000_get_phy_info_m88;
313 		break;
314 	default:
315 		ret_val = -E1000_ERR_PHY;
316 		break;
317 	}
318 
319 	return ret_val;
320 }
321 
322 /**
323  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
324  *  @hw: pointer to the HW structure
325  *
326  *  Initialize family-specific PHY parameters and function pointers.
327  **/
328 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
329 {
330 	struct e1000_phy_info *phy = &hw->phy;
331 	s32 ret_val;
332 	u16 i = 0;
333 
334 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
335 
336 	phy->addr		= 1;
337 	phy->reset_delay_us	= 100;
338 
339 	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
340 	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
341 	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
342 	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
343 	phy->ops.read_reg	= e1000_read_phy_reg_igp;
344 	phy->ops.release	= e1000_release_swflag_ich8lan;
345 	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
346 	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
347 	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
348 	phy->ops.write_reg	= e1000_write_phy_reg_igp;
349 	phy->ops.power_up	= e1000_power_up_phy_copper;
350 	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
351 
352 	/*
353 	 * We may need to do this twice - once for IGP and if that fails,
354 	 * we'll set BM func pointers and try again
355 	 */
356 	ret_val = e1000_determine_phy_address(hw);
357 	if (ret_val) {
358 		phy->ops.write_reg = e1000_write_phy_reg_bm;
359 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
360 		ret_val = e1000_determine_phy_address(hw);
361 		if (ret_val) {
362 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
363 			return ret_val;
364 		}
365 	}
366 
367 	phy->id = 0;
368 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
369 	       (i++ < 100)) {
370 		msec_delay(1);
371 		ret_val = e1000_get_phy_id(hw);
372 		if (ret_val)
373 			return ret_val;
374 	}
375 
376 	/* Verify phy id */
377 	switch (phy->id) {
378 	case IGP03E1000_E_PHY_ID:
379 		phy->type = e1000_phy_igp_3;
380 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
381 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
382 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
383 		phy->ops.get_info = e1000_get_phy_info_igp;
384 		phy->ops.check_polarity = e1000_check_polarity_igp;
385 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
386 		break;
387 	case IFE_E_PHY_ID:
388 	case IFE_PLUS_E_PHY_ID:
389 	case IFE_C_E_PHY_ID:
390 		phy->type = e1000_phy_ife;
391 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
392 		phy->ops.get_info = e1000_get_phy_info_ife;
393 		phy->ops.check_polarity = e1000_check_polarity_ife;
394 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
395 		break;
396 	case BME1000_E_PHY_ID:
397 		phy->type = e1000_phy_bm;
398 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
399 		phy->ops.read_reg = e1000_read_phy_reg_bm;
400 		phy->ops.write_reg = e1000_write_phy_reg_bm;
401 		phy->ops.commit = e1000_phy_sw_reset_generic;
402 		phy->ops.get_info = e1000_get_phy_info_m88;
403 		phy->ops.check_polarity = e1000_check_polarity_m88;
404 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
405 		break;
406 	default:
407 		return -E1000_ERR_PHY;
408 		break;
409 	}
410 
411 	return E1000_SUCCESS;
412 }
413 
414 /**
415  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
416  *  @hw: pointer to the HW structure
417  *
418  *  Initialize family-specific NVM parameters and function
419  *  pointers.
420  **/
421 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
422 {
423 	struct e1000_nvm_info *nvm = &hw->nvm;
424 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
425 	u32 gfpreg, sector_base_addr, sector_end_addr;
426 	u16 i;
427 
428 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
429 
430 	/* Can't read flash registers if the register set isn't mapped. */
431 	if (!hw->flash_address) {
432 		DEBUGOUT("ERROR: Flash registers not mapped\n");
433 		return -E1000_ERR_CONFIG;
434 	}
435 
436 	nvm->type = e1000_nvm_flash_sw;
437 
438 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
439 
440 	/*
441 	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
442 	 * Add 1 to sector_end_addr since this sector is included in
443 	 * the overall size.
444 	 */
445 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
446 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
447 
448 	/* flash_base_addr is byte-aligned */
449 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
450 
451 	/*
452 	 * find total size of the NVM, then cut in half since the total
453 	 * size represents two separate NVM banks.
454 	 */
455 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
456 				<< FLASH_SECTOR_ADDR_SHIFT;
457 	nvm->flash_bank_size /= 2;
458 	/* Adjust to word count */
459 	nvm->flash_bank_size /= sizeof(u16);
460 
461 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
462 
463 	/* Clear shadow ram */
464 	for (i = 0; i < nvm->word_size; i++) {
465 		dev_spec->shadow_ram[i].modified = FALSE;
466 		dev_spec->shadow_ram[i].value    = 0xFFFF;
467 	}
468 
469 	/* Function Pointers */
470 	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
471 	nvm->ops.release	= e1000_release_nvm_ich8lan;
472 	nvm->ops.read		= e1000_read_nvm_ich8lan;
473 	nvm->ops.update		= e1000_update_nvm_checksum_ich8lan;
474 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
475 	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
476 	nvm->ops.write		= e1000_write_nvm_ich8lan;
477 
478 	return E1000_SUCCESS;
479 }
480 
481 /**
482  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
483  *  @hw: pointer to the HW structure
484  *
485  *  Initialize family-specific MAC parameters and function
486  *  pointers.
487  **/
488 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
489 {
490 	struct e1000_mac_info *mac = &hw->mac;
491 
492 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
493 
494 	/* Set media type function pointer */
495 	hw->phy.media_type = e1000_media_type_copper;
496 
497 	/* Set mta register count */
498 	mac->mta_reg_count = 32;
499 	/* Set rar entry count */
500 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
501 	if (mac->type == e1000_ich8lan)
502 		mac->rar_entry_count--;
503 	/* Set if part includes ASF firmware */
504 	mac->asf_firmware_present = TRUE;
505 	/* FWSM register */
506 	mac->has_fwsm = TRUE;
507 	/* ARC subsystem not supported */
508 	mac->arc_subsystem_valid = FALSE;
509 	/* Adaptive IFS supported */
510 	mac->adaptive_ifs = TRUE;
511 
512 	/* Function pointers */
513 
514 	/* bus type/speed/width */
515 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
516 	/* function id */
517 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
518 	/* reset */
519 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
520 	/* hw initialization */
521 	mac->ops.init_hw = e1000_init_hw_ich8lan;
522 	/* link setup */
523 	mac->ops.setup_link = e1000_setup_link_ich8lan;
524 	/* physical interface setup */
525 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
526 	/* check for link */
527 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
528 	/* link info */
529 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
530 	/* multicast address update */
531 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
532 	/* clear hardware counters */
533 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
534 
535 	/* LED and other operations */
536 	switch (mac->type) {
537 	case e1000_ich8lan:
538 	case e1000_ich9lan:
539 	case e1000_ich10lan:
540 		/* check management mode */
541 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
542 		/* ID LED init */
543 		mac->ops.id_led_init = e1000_id_led_init_generic;
544 		/* blink LED */
545 		mac->ops.blink_led = e1000_blink_led_generic;
546 		/* setup LED */
547 		mac->ops.setup_led = e1000_setup_led_generic;
548 		/* cleanup LED */
549 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
550 		/* turn on/off LED */
551 		mac->ops.led_on = e1000_led_on_ich8lan;
552 		mac->ops.led_off = e1000_led_off_ich8lan;
553 		break;
554 	case e1000_pch2lan:
555 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
556 		mac->ops.rar_set = e1000_rar_set_pch2lan;
557 		/* multicast address update for pch2 */
558 		mac->ops.update_mc_addr_list =
559 			e1000_update_mc_addr_list_pch2lan;
560 	case e1000_pchlan:
561 		/* check management mode */
562 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
563 		/* ID LED init */
564 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
565 		/* setup LED */
566 		mac->ops.setup_led = e1000_setup_led_pchlan;
567 		/* cleanup LED */
568 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
569 		/* turn on/off LED */
570 		mac->ops.led_on = e1000_led_on_pchlan;
571 		mac->ops.led_off = e1000_led_off_pchlan;
572 		break;
573 	default:
574 		break;
575 	}
576 
577 	/* Enable PCS Lock-loss workaround for ICH8 */
578 	if (mac->type == e1000_ich8lan)
579 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
580 
581 	/* Gate automatic PHY configuration by hardware on managed 82579 */
582 	if ((mac->type == e1000_pch2lan) &&
583 	    (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
584 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
585 
586 	return E1000_SUCCESS;
587 }
588 
589 /**
590  *  e1000_set_eee_pchlan - Enable/disable EEE support
591  *  @hw: pointer to the HW structure
592  *
593  *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
594  *  the LPI Control register will remain set only if/when link is up.
595  **/
596 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
597 {
598 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
599 	s32 ret_val = E1000_SUCCESS;
600 	u16 phy_reg;
601 
602 	DEBUGFUNC("e1000_set_eee_pchlan");
603 
604 	if (hw->phy.type != e1000_phy_82579)
605 		return E1000_SUCCESS;
606 
607 	ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
608 	if (ret_val)
609 		return ret_val;
610 
611 	if (dev_spec->eee_disable)
612 		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
613 	else
614 		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
615 
616 	return hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
617 }
618 
619 /**
620  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
621  *  @hw: pointer to the HW structure
622  *
623  *  Checks to see of the link status of the hardware has changed.  If a
624  *  change in link status has been detected, then we read the PHY registers
625  *  to get the current speed/duplex if link exists.
626  **/
627 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
628 {
629 	struct e1000_mac_info *mac = &hw->mac;
630 	s32 ret_val;
631 	bool link;
632 	u16 phy_reg;
633 
634 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
635 
636 	/*
637 	 * We only want to go out to the PHY registers to see if Auto-Neg
638 	 * has completed and/or if our link status has changed.  The
639 	 * get_link_status flag is set upon receiving a Link Status
640 	 * Change or Rx Sequence Error interrupt.
641 	 */
642 	if (!mac->get_link_status)
643 		return E1000_SUCCESS;
644 
645 	/*
646 	 * First we want to see if the MII Status Register reports
647 	 * link.  If so, then we want to get the current speed/duplex
648 	 * of the PHY.
649 	 */
650 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
651 	if (ret_val)
652 		return ret_val;
653 
654 	if (hw->mac.type == e1000_pchlan) {
655 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
656 		if (ret_val)
657 			return ret_val;
658 	}
659 
660 	if (!link)
661 		return E1000_SUCCESS; /* No link detected */
662 
663 	mac->get_link_status = FALSE;
664 
665 	switch (hw->mac.type) {
666 	case e1000_pch2lan:
667 		ret_val = e1000_k1_workaround_lv(hw);
668 		if (ret_val)
669 			return ret_val;
670 		/* fall-thru */
671 	case e1000_pchlan:
672 		if (hw->phy.type == e1000_phy_82578) {
673 			ret_val = e1000_link_stall_workaround_hv(hw);
674 			if (ret_val)
675 				return ret_val;
676 		}
677 
678 		/*
679 		 * Workaround for PCHx parts in half-duplex:
680 		 * Set the number of preambles removed from the packet
681 		 * when it is passed from the PHY to the MAC to prevent
682 		 * the MAC from misinterpreting the packet type.
683 		 */
684 		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
685 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
686 
687 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
688 		    E1000_STATUS_FD)
689 			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
690 
691 		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
692 		break;
693 	default:
694 		break;
695 	}
696 
697 	/*
698 	 * Check if there was DownShift, must be checked
699 	 * immediately after link-up
700 	 */
701 	e1000_check_downshift_generic(hw);
702 
703 	/* Enable/Disable EEE after link up */
704 	ret_val = e1000_set_eee_pchlan(hw);
705 	if (ret_val)
706 		return ret_val;
707 
708 	/*
709 	 * If we are forcing speed/duplex, then we simply return since
710 	 * we have already determined whether we have link or not.
711 	 */
712 	if (!mac->autoneg)
713 		return -E1000_ERR_CONFIG;
714 
715 	/*
716 	 * Auto-Neg is enabled.  Auto Speed Detection takes care
717 	 * of MAC speed/duplex configuration.  So we only need to
718 	 * configure Collision Distance in the MAC.
719 	 */
720 	mac->ops.config_collision_dist(hw);
721 
722 	/*
723 	 * Configure Flow Control now that Auto-Neg has completed.
724 	 * First, we need to restore the desired flow control
725 	 * settings because we may have had to re-autoneg with a
726 	 * different link partner.
727 	 */
728 	ret_val = e1000_config_fc_after_link_up_generic(hw);
729 	if (ret_val)
730 		DEBUGOUT("Error configuring flow control\n");
731 
732 	return ret_val;
733 }
734 
735 /**
736  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
737  *  @hw: pointer to the HW structure
738  *
739  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
740  **/
741 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
742 {
743 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
744 
745 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
746 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
747 	switch (hw->mac.type) {
748 	case e1000_ich8lan:
749 	case e1000_ich9lan:
750 	case e1000_ich10lan:
751 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
752 		break;
753 	case e1000_pchlan:
754 	case e1000_pch2lan:
755 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
756 		break;
757 	default:
758 		break;
759 	}
760 }
761 
762 /**
763  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
764  *  @hw: pointer to the HW structure
765  *
766  *  Acquires the mutex for performing NVM operations.
767  **/
768 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
769 {
770 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
771 
772 	return E1000_SUCCESS;
773 }
774 
775 /**
776  *  e1000_release_nvm_ich8lan - Release NVM mutex
777  *  @hw: pointer to the HW structure
778  *
779  *  Releases the mutex used while performing NVM operations.
780  **/
781 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
782 {
783 	DEBUGFUNC("e1000_release_nvm_ich8lan");
784 
785 	return;
786 }
787 
788 /**
789  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
790  *  @hw: pointer to the HW structure
791  *
792  *  Acquires the software control flag for performing PHY and select
793  *  MAC CSR accesses.
794  **/
795 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
796 {
797 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
798 	s32 ret_val = E1000_SUCCESS;
799 
800 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
801 
802 	while (timeout) {
803 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
804 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
805 			break;
806 
807 		msec_delay_irq(1);
808 		timeout--;
809 	}
810 
811 	if (!timeout) {
812 		DEBUGOUT("SW has already locked the resource.\n");
813 		ret_val = -E1000_ERR_CONFIG;
814 		goto out;
815 	}
816 
817 	timeout = SW_FLAG_TIMEOUT;
818 
819 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
820 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
821 
822 	while (timeout) {
823 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
824 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
825 			break;
826 
827 		msec_delay_irq(1);
828 		timeout--;
829 	}
830 
831 	if (!timeout) {
832 		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
833 			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
834 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
835 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
836 		ret_val = -E1000_ERR_CONFIG;
837 		goto out;
838 	}
839 
840 out:
841 	return ret_val;
842 }
843 
844 /**
845  *  e1000_release_swflag_ich8lan - Release software control flag
846  *  @hw: pointer to the HW structure
847  *
848  *  Releases the software control flag for performing PHY and select
849  *  MAC CSR accesses.
850  **/
851 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
852 {
853 	u32 extcnf_ctrl;
854 
855 	DEBUGFUNC("e1000_release_swflag_ich8lan");
856 
857 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
858 
859 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
860 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
861 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
862 	} else {
863 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
864 	}
865 
866 	return;
867 }
868 
869 /**
870  *  e1000_check_mng_mode_ich8lan - Checks management mode
871  *  @hw: pointer to the HW structure
872  *
873  *  This checks if the adapter has any manageability enabled.
874  *  This is a function pointer entry point only called by read/write
875  *  routines for the PHY and NVM parts.
876  **/
877 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
878 {
879 	u32 fwsm;
880 
881 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
882 
883 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
884 
885 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
886 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
887 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
888 }
889 
890 /**
891  *  e1000_check_mng_mode_pchlan - Checks management mode
892  *  @hw: pointer to the HW structure
893  *
894  *  This checks if the adapter has iAMT enabled.
895  *  This is a function pointer entry point only called by read/write
896  *  routines for the PHY and NVM parts.
897  **/
898 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
899 {
900 	u32 fwsm;
901 
902 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
903 
904 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
905 
906 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
907 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
908 }
909 
910 /**
911  *  e1000_rar_set_pch2lan - Set receive address register
912  *  @hw: pointer to the HW structure
913  *  @addr: pointer to the receive address
914  *  @index: receive address array register
915  *
916  *  Sets the receive address array register at index to the address passed
917  *  in by addr.  For 82579, RAR[0] is the base address register that is to
918  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
919  *  Use SHRA[0-3] in place of those reserved for ME.
920  **/
921 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
922 {
923 	u32 rar_low, rar_high;
924 
925 	DEBUGFUNC("e1000_rar_set_pch2lan");
926 
927 	/*
928 	 * HW expects these in little endian so we reverse the byte order
929 	 * from network order (big endian) to little endian
930 	 */
931 	rar_low = ((u32) addr[0] |
932 		   ((u32) addr[1] << 8) |
933 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
934 
935 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
936 
937 	/* If MAC address zero, no need to set the AV bit */
938 	if (rar_low || rar_high)
939 		rar_high |= E1000_RAH_AV;
940 
941 	if (index == 0) {
942 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
943 		E1000_WRITE_FLUSH(hw);
944 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
945 		E1000_WRITE_FLUSH(hw);
946 		return;
947 	}
948 
949 	if (index < hw->mac.rar_entry_count) {
950 		s32 ret_val;
951 
952 		ret_val = e1000_acquire_swflag_ich8lan(hw);
953 		if (ret_val)
954 			goto out;
955 
956 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
957 		E1000_WRITE_FLUSH(hw);
958 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
959 		E1000_WRITE_FLUSH(hw);
960 
961 		e1000_release_swflag_ich8lan(hw);
962 
963 		/* verify the register updates */
964 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
965 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
966 			return;
967 
968 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
969 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
970 	}
971 
972 out:
973 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
974 }
975 
976 /**
977  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
978  *  @hw: pointer to the HW structure
979  *  @mc_addr_list: array of multicast addresses to program
980  *  @mc_addr_count: number of multicast addresses to program
981  *
982  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
983  *  The caller must have a packed mc_addr_list of multicast addresses.
984  **/
985 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
986 					      u8 *mc_addr_list,
987 					      u32 mc_addr_count)
988 {
989 	u16 phy_reg = 0;
990 	int i;
991 	s32 ret_val;
992 
993 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
994 
995 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
996 
997 	ret_val = hw->phy.ops.acquire(hw);
998 	if (ret_val)
999 		return;
1000 
1001 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1002 	if (ret_val)
1003 		goto release;
1004 
1005 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
1006 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1007 					   (u16)(hw->mac.mta_shadow[i] &
1008 						 0xFFFF));
1009 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1010 					   (u16)((hw->mac.mta_shadow[i] >> 16) &
1011 						 0xFFFF));
1012 	}
1013 
1014 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1015 
1016 release:
1017 	hw->phy.ops.release(hw);
1018 }
1019 
1020 /**
1021  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1022  *  @hw: pointer to the HW structure
1023  *
1024  *  Checks if firmware is blocking the reset of the PHY.
1025  *  This is a function pointer entry point only called by
1026  *  reset routines.
1027  **/
1028 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1029 {
1030 	u32 fwsm;
1031 
1032 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
1033 
1034 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1035 
1036 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1037 						: E1000_BLK_PHY_RESET;
1038 }
1039 
1040 /**
1041  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1042  *  @hw: pointer to the HW structure
1043  *
1044  *  Assumes semaphore already acquired.
1045  *
1046  **/
1047 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1048 {
1049 	u16 phy_data;
1050 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1051 	s32 ret_val = E1000_SUCCESS;
1052 
1053 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1054 
1055 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1056 	if (ret_val)
1057 		return ret_val;
1058 
1059 	phy_data &= ~HV_SMB_ADDR_MASK;
1060 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1061 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1062 
1063 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1064 }
1065 
1066 /**
1067  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1068  *  @hw:   pointer to the HW structure
1069  *
1070  *  SW should configure the LCD from the NVM extended configuration region
1071  *  as a workaround for certain parts.
1072  **/
1073 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1074 {
1075 	struct e1000_phy_info *phy = &hw->phy;
1076 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1077 	s32 ret_val = E1000_SUCCESS;
1078 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1079 
1080 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1081 
1082 	/*
1083 	 * Initialize the PHY from the NVM on ICH platforms.  This
1084 	 * is needed due to an issue where the NVM configuration is
1085 	 * not properly autoloaded after power transitions.
1086 	 * Therefore, after each PHY reset, we will load the
1087 	 * configuration data out of the NVM manually.
1088 	 */
1089 	switch (hw->mac.type) {
1090 	case e1000_ich8lan:
1091 		if (phy->type != e1000_phy_igp_3)
1092 			return ret_val;
1093 
1094 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1095 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1096 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1097 			break;
1098 		}
1099 		/* Fall-thru */
1100 	case e1000_pchlan:
1101 	case e1000_pch2lan:
1102 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1103 		break;
1104 	default:
1105 		return ret_val;
1106 	}
1107 
1108 	ret_val = hw->phy.ops.acquire(hw);
1109 	if (ret_val)
1110 		return ret_val;
1111 
1112 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
1113 	if (!(data & sw_cfg_mask))
1114 		goto release;
1115 
1116 	/*
1117 	 * Make sure HW does not configure LCD from PHY
1118 	 * extended configuration before SW configuration
1119 	 */
1120 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1121 	if ((hw->mac.type < e1000_pch2lan) &&
1122 	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1123 			goto release;
1124 
1125 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1126 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1127 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1128 	if (!cnf_size)
1129 		goto release;
1130 
1131 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1132 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1133 
1134 	if (((hw->mac.type == e1000_pchlan) &&
1135 	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1136 	    (hw->mac.type > e1000_pchlan)) {
1137 		/*
1138 		 * HW configures the SMBus address and LEDs when the
1139 		 * OEM and LCD Write Enable bits are set in the NVM.
1140 		 * When both NVM bits are cleared, SW will configure
1141 		 * them instead.
1142 		 */
1143 		ret_val = e1000_write_smbus_addr(hw);
1144 		if (ret_val)
1145 			goto release;
1146 
1147 		data = E1000_READ_REG(hw, E1000_LEDCTL);
1148 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1149 							(u16)data);
1150 		if (ret_val)
1151 			goto release;
1152 	}
1153 
1154 	/* Configure LCD from extended configuration region. */
1155 
1156 	/* cnf_base_addr is in DWORD */
1157 	word_addr = (u16)(cnf_base_addr << 1);
1158 
1159 	for (i = 0; i < cnf_size; i++) {
1160 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1161 					   &reg_data);
1162 		if (ret_val)
1163 			goto release;
1164 
1165 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1166 					   1, &reg_addr);
1167 		if (ret_val)
1168 			goto release;
1169 
1170 		/* Save off the PHY page for future writes. */
1171 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1172 			phy_page = reg_data;
1173 			continue;
1174 		}
1175 
1176 		reg_addr &= PHY_REG_MASK;
1177 		reg_addr |= phy_page;
1178 
1179 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1180 						    reg_data);
1181 		if (ret_val)
1182 			goto release;
1183 	}
1184 
1185 release:
1186 	hw->phy.ops.release(hw);
1187 	return ret_val;
1188 }
1189 
1190 /**
1191  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1192  *  @hw:   pointer to the HW structure
1193  *  @link: link up bool flag
1194  *
1195  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1196  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1197  *  If link is down, the function will restore the default K1 setting located
1198  *  in the NVM.
1199  **/
1200 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1201 {
1202 	s32 ret_val = E1000_SUCCESS;
1203 	u16 status_reg = 0;
1204 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1205 
1206 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
1207 
1208 	if (hw->mac.type != e1000_pchlan)
1209 		return E1000_SUCCESS;
1210 
1211 	/* Wrap the whole flow with the sw flag */
1212 	ret_val = hw->phy.ops.acquire(hw);
1213 	if (ret_val)
1214 		return ret_val;
1215 
1216 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1217 	if (link) {
1218 		if (hw->phy.type == e1000_phy_82578) {
1219 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1220 							      &status_reg);
1221 			if (ret_val)
1222 				goto release;
1223 
1224 			status_reg &= BM_CS_STATUS_LINK_UP |
1225 				      BM_CS_STATUS_RESOLVED |
1226 				      BM_CS_STATUS_SPEED_MASK;
1227 
1228 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1229 					   BM_CS_STATUS_RESOLVED |
1230 					   BM_CS_STATUS_SPEED_1000))
1231 				k1_enable = FALSE;
1232 		}
1233 
1234 		if (hw->phy.type == e1000_phy_82577) {
1235 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1236 							      &status_reg);
1237 			if (ret_val)
1238 				goto release;
1239 
1240 			status_reg &= HV_M_STATUS_LINK_UP |
1241 				      HV_M_STATUS_AUTONEG_COMPLETE |
1242 				      HV_M_STATUS_SPEED_MASK;
1243 
1244 			if (status_reg == (HV_M_STATUS_LINK_UP |
1245 					   HV_M_STATUS_AUTONEG_COMPLETE |
1246 					   HV_M_STATUS_SPEED_1000))
1247 				k1_enable = FALSE;
1248 		}
1249 
1250 		/* Link stall fix for link up */
1251 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1252 						       0x0100);
1253 		if (ret_val)
1254 			goto release;
1255 
1256 	} else {
1257 		/* Link stall fix for link down */
1258 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1259 						       0x4100);
1260 		if (ret_val)
1261 			goto release;
1262 	}
1263 
1264 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1265 
1266 release:
1267 	hw->phy.ops.release(hw);
1268 
1269 	return ret_val;
1270 }
1271 
1272 /**
1273  *  e1000_configure_k1_ich8lan - Configure K1 power state
1274  *  @hw: pointer to the HW structure
1275  *  @enable: K1 state to configure
1276  *
1277  *  Configure the K1 power state based on the provided parameter.
1278  *  Assumes semaphore already acquired.
1279  *
1280  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1281  **/
1282 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1283 {
1284 	s32 ret_val = E1000_SUCCESS;
1285 	u32 ctrl_reg = 0;
1286 	u32 ctrl_ext = 0;
1287 	u32 reg = 0;
1288 	u16 kmrn_reg = 0;
1289 
1290 	DEBUGFUNC("e1000_configure_k1_ich8lan");
1291 
1292 	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1293 					     &kmrn_reg);
1294 	if (ret_val)
1295 		return ret_val;
1296 
1297 	if (k1_enable)
1298 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1299 	else
1300 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1301 
1302 	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1303 					      kmrn_reg);
1304 	if (ret_val)
1305 		return ret_val;
1306 
1307 	usec_delay(20);
1308 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1309 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1310 
1311 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1312 	reg |= E1000_CTRL_FRCSPD;
1313 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
1314 
1315 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1316 	E1000_WRITE_FLUSH(hw);
1317 	usec_delay(20);
1318 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1319 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1320 	E1000_WRITE_FLUSH(hw);
1321 	usec_delay(20);
1322 
1323 	return E1000_SUCCESS;
1324 }
1325 
1326 /**
1327  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1328  *  @hw:       pointer to the HW structure
1329  *  @d0_state: boolean if entering d0 or d3 device state
1330  *
1331  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1332  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1333  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1334  **/
1335 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1336 {
1337 	s32 ret_val = 0;
1338 	u32 mac_reg;
1339 	u16 oem_reg;
1340 
1341 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1342 
1343 	if (hw->mac.type < e1000_pchlan)
1344 		return ret_val;
1345 
1346 	ret_val = hw->phy.ops.acquire(hw);
1347 	if (ret_val)
1348 		return ret_val;
1349 
1350 	if (hw->mac.type == e1000_pchlan) {
1351 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1352 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1353 			goto release;
1354 	}
1355 
1356 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1357 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1358 		goto release;
1359 
1360 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1361 
1362 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1363 	if (ret_val)
1364 		goto release;
1365 
1366 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1367 
1368 	if (d0_state) {
1369 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1370 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1371 
1372 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1373 			oem_reg |= HV_OEM_BITS_LPLU;
1374 	} else {
1375 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1376 		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1377 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1378 
1379 		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1380 		    E1000_PHY_CTRL_NOND0A_LPLU))
1381 			oem_reg |= HV_OEM_BITS_LPLU;
1382 	}
1383 
1384 	/* Set Restart auto-neg to activate the bits */
1385 	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1386 	    !hw->phy.ops.check_reset_block(hw))
1387 		oem_reg |= HV_OEM_BITS_RESTART_AN;
1388 
1389 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1390 
1391 release:
1392 	hw->phy.ops.release(hw);
1393 
1394 	return ret_val;
1395 }
1396 
1397 
1398 /**
1399  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1400  *  @hw:   pointer to the HW structure
1401  **/
1402 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1403 {
1404 	s32 ret_val;
1405 	u16 data;
1406 
1407 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1408 
1409 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1410 	if (ret_val)
1411 		return ret_val;
1412 
1413 	data |= HV_KMRN_MDIO_SLOW;
1414 
1415 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1416 
1417 	return ret_val;
1418 }
1419 
1420 /**
1421  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1422  *  done after every PHY reset.
1423  **/
1424 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1425 {
1426 	s32 ret_val = E1000_SUCCESS;
1427 	u16 phy_data;
1428 
1429 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1430 
1431 	if (hw->mac.type != e1000_pchlan)
1432 		return E1000_SUCCESS;
1433 
1434 	/* Set MDIO slow mode before any other MDIO access */
1435 	if (hw->phy.type == e1000_phy_82577) {
1436 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
1437 		if (ret_val)
1438 			return ret_val;
1439 	}
1440 
1441 	if (((hw->phy.type == e1000_phy_82577) &&
1442 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1443 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1444 		/* Disable generation of early preamble */
1445 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1446 		if (ret_val)
1447 			return ret_val;
1448 
1449 		/* Preamble tuning for SSC */
1450 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
1451 						0xA204);
1452 		if (ret_val)
1453 			return ret_val;
1454 	}
1455 
1456 	if (hw->phy.type == e1000_phy_82578) {
1457 		/*
1458 		 * Return registers to default by doing a soft reset then
1459 		 * writing 0x3140 to the control register.
1460 		 */
1461 		if (hw->phy.revision < 2) {
1462 			e1000_phy_sw_reset_generic(hw);
1463 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1464 							0x3140);
1465 		}
1466 	}
1467 
1468 	/* Select page 0 */
1469 	ret_val = hw->phy.ops.acquire(hw);
1470 	if (ret_val)
1471 		return ret_val;
1472 
1473 	hw->phy.addr = 1;
1474 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1475 	hw->phy.ops.release(hw);
1476 	if (ret_val)
1477 		return ret_val;
1478 
1479 	/*
1480 	 * Configure the K1 Si workaround during phy reset assuming there is
1481 	 * link so that it disables K1 if link is in 1Gbps.
1482 	 */
1483 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1484 	if (ret_val)
1485 		return ret_val;
1486 
1487 	/* Workaround for link disconnects on a busy hub in half duplex */
1488 	ret_val = hw->phy.ops.acquire(hw);
1489 	if (ret_val)
1490 		return ret_val;
1491 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1492 	if (ret_val)
1493 		goto release;
1494 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
1495 					       phy_data & 0x00FF);
1496 release:
1497 	hw->phy.ops.release(hw);
1498 
1499 	return ret_val;
1500 }
1501 
1502 /**
1503  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1504  *  @hw:   pointer to the HW structure
1505  **/
1506 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1507 {
1508 	u32 mac_reg;
1509 	u16 i, phy_reg = 0;
1510 	s32 ret_val;
1511 
1512 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1513 
1514 	ret_val = hw->phy.ops.acquire(hw);
1515 	if (ret_val)
1516 		return;
1517 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1518 	if (ret_val)
1519 		goto release;
1520 
1521 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1522 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1523 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1524 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1525 					   (u16)(mac_reg & 0xFFFF));
1526 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1527 					   (u16)((mac_reg >> 16) & 0xFFFF));
1528 
1529 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1530 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1531 					   (u16)(mac_reg & 0xFFFF));
1532 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1533 					   (u16)((mac_reg & E1000_RAH_AV)
1534 						 >> 16));
1535 	}
1536 
1537 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1538 
1539 release:
1540 	hw->phy.ops.release(hw);
1541 }
1542 
1543 static u32 e1000_calc_rx_da_crc(u8 mac[])
1544 {
1545 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
1546 	u32 i, j, mask, crc;
1547 
1548 	DEBUGFUNC("e1000_calc_rx_da_crc");
1549 
1550 	crc = 0xffffffff;
1551 	for (i = 0; i < 6; i++) {
1552 		crc = crc ^ mac[i];
1553 		for (j = 8; j > 0; j--) {
1554 			mask = (crc & 1) * (-1);
1555 			crc = (crc >> 1) ^ (poly & mask);
1556 		}
1557 	}
1558 	return ~crc;
1559 }
1560 
1561 /**
1562  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1563  *  with 82579 PHY
1564  *  @hw: pointer to the HW structure
1565  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
1566  **/
1567 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1568 {
1569 	s32 ret_val = E1000_SUCCESS;
1570 	u16 phy_reg, data;
1571 	u32 mac_reg;
1572 	u16 i;
1573 
1574 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1575 
1576 	if (hw->mac.type != e1000_pch2lan)
1577 		return E1000_SUCCESS;
1578 
1579 	/* disable Rx path while enabling/disabling workaround */
1580 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1581 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
1582 					phy_reg | (1 << 14));
1583 	if (ret_val)
1584 		return ret_val;
1585 
1586 	if (enable) {
1587 		/*
1588 		 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1589 		 * SHRAL/H) and initial CRC values to the MAC
1590 		 */
1591 		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1592 			u8 mac_addr[ETH_ADDR_LEN] = {0};
1593 			u32 addr_high, addr_low;
1594 
1595 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1596 			if (!(addr_high & E1000_RAH_AV))
1597 				continue;
1598 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1599 			mac_addr[0] = (addr_low & 0xFF);
1600 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
1601 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
1602 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
1603 			mac_addr[4] = (addr_high & 0xFF);
1604 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
1605 
1606 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1607 					e1000_calc_rx_da_crc(mac_addr));
1608 		}
1609 
1610 		/* Write Rx addresses to the PHY */
1611 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1612 
1613 		/* Enable jumbo frame workaround in the MAC */
1614 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1615 		mac_reg &= ~(1 << 14);
1616 		mac_reg |= (7 << 15);
1617 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1618 
1619 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1620 		mac_reg |= E1000_RCTL_SECRC;
1621 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1622 
1623 		ret_val = e1000_read_kmrn_reg_generic(hw,
1624 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1625 						&data);
1626 		if (ret_val)
1627 			return ret_val;
1628 		ret_val = e1000_write_kmrn_reg_generic(hw,
1629 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1630 						data | (1 << 0));
1631 		if (ret_val)
1632 			return ret_val;
1633 		ret_val = e1000_read_kmrn_reg_generic(hw,
1634 						E1000_KMRNCTRLSTA_HD_CTRL,
1635 						&data);
1636 		if (ret_val)
1637 			return ret_val;
1638 		data &= ~(0xF << 8);
1639 		data |= (0xB << 8);
1640 		ret_val = e1000_write_kmrn_reg_generic(hw,
1641 						E1000_KMRNCTRLSTA_HD_CTRL,
1642 						data);
1643 		if (ret_val)
1644 			return ret_val;
1645 
1646 		/* Enable jumbo frame workaround in the PHY */
1647 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1648 		data &= ~(0x7F << 5);
1649 		data |= (0x37 << 5);
1650 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1651 		if (ret_val)
1652 			return ret_val;
1653 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1654 		data &= ~(1 << 13);
1655 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1656 		if (ret_val)
1657 			return ret_val;
1658 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1659 		data &= ~(0x3FF << 2);
1660 		data |= (0x1A << 2);
1661 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1662 		if (ret_val)
1663 			return ret_val;
1664 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
1665 		if (ret_val)
1666 			return ret_val;
1667 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1668 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
1669 						(1 << 10));
1670 		if (ret_val)
1671 			return ret_val;
1672 	} else {
1673 		/* Write MAC register values back to h/w defaults */
1674 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1675 		mac_reg &= ~(0xF << 14);
1676 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1677 
1678 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1679 		mac_reg &= ~E1000_RCTL_SECRC;
1680 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1681 
1682 		ret_val = e1000_read_kmrn_reg_generic(hw,
1683 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1684 						&data);
1685 		if (ret_val)
1686 			return ret_val;
1687 		ret_val = e1000_write_kmrn_reg_generic(hw,
1688 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1689 						data & ~(1 << 0));
1690 		if (ret_val)
1691 			return ret_val;
1692 		ret_val = e1000_read_kmrn_reg_generic(hw,
1693 						E1000_KMRNCTRLSTA_HD_CTRL,
1694 						&data);
1695 		if (ret_val)
1696 			return ret_val;
1697 		data &= ~(0xF << 8);
1698 		data |= (0xB << 8);
1699 		ret_val = e1000_write_kmrn_reg_generic(hw,
1700 						E1000_KMRNCTRLSTA_HD_CTRL,
1701 						data);
1702 		if (ret_val)
1703 			return ret_val;
1704 
1705 		/* Write PHY register values back to h/w defaults */
1706 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1707 		data &= ~(0x7F << 5);
1708 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1709 		if (ret_val)
1710 			return ret_val;
1711 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1712 		data |= (1 << 13);
1713 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1714 		if (ret_val)
1715 			return ret_val;
1716 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1717 		data &= ~(0x3FF << 2);
1718 		data |= (0x8 << 2);
1719 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1720 		if (ret_val)
1721 			return ret_val;
1722 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1723 		if (ret_val)
1724 			return ret_val;
1725 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1726 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
1727 						~(1 << 10));
1728 		if (ret_val)
1729 			return ret_val;
1730 	}
1731 
1732 	/* re-enable Rx path after enabling/disabling workaround */
1733 	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
1734 				     ~(1 << 14));
1735 }
1736 
1737 /**
1738  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1739  *  done after every PHY reset.
1740  **/
1741 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1742 {
1743 	s32 ret_val = E1000_SUCCESS;
1744 
1745 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1746 
1747 	if (hw->mac.type != e1000_pch2lan)
1748 		return E1000_SUCCESS;
1749 
1750 	/* Set MDIO slow mode before any other MDIO access */
1751 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
1752 
1753 	ret_val = hw->phy.ops.acquire(hw);
1754 	if (ret_val)
1755 		return ret_val;
1756 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1757 					       I82579_MSE_THRESHOLD);
1758 	if (ret_val)
1759 		goto release;
1760 	/* set MSE higher to enable link to stay up when noise is high */
1761 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1762 					       0x0034);
1763 	if (ret_val)
1764 		goto release;
1765 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1766 					       I82579_MSE_LINK_DOWN);
1767 	if (ret_val)
1768 		goto release;
1769 	/* drop link after 5 times MSE threshold was reached */
1770 	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1771 					       0x0005);
1772 release:
1773 	hw->phy.ops.release(hw);
1774 
1775 	return ret_val;
1776 }
1777 
1778 /**
1779  *  e1000_k1_gig_workaround_lv - K1 Si workaround
1780  *  @hw:   pointer to the HW structure
1781  *
1782  *  Workaround to set the K1 beacon duration for 82579 parts
1783  **/
1784 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1785 {
1786 	s32 ret_val = E1000_SUCCESS;
1787 	u16 status_reg = 0;
1788 	u32 mac_reg;
1789 	u16 phy_reg;
1790 
1791 	DEBUGFUNC("e1000_k1_workaround_lv");
1792 
1793 	if (hw->mac.type != e1000_pch2lan)
1794 		return E1000_SUCCESS;
1795 
1796 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
1797 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1798 	if (ret_val)
1799 		return ret_val;
1800 
1801 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1802 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1803 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1804 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1805 
1806 		ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
1807 		if (ret_val)
1808 			return ret_val;
1809 
1810 		if (status_reg & HV_M_STATUS_SPEED_1000) {
1811 			u16 pm_phy_reg;
1812 
1813 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1814 			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1815 			/* LV 1G Packet drop issue wa  */
1816 			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
1817 						       &pm_phy_reg);
1818 			if (ret_val)
1819 				return ret_val;
1820 			pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
1821 			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
1822 							pm_phy_reg);
1823 			if (ret_val)
1824 				return ret_val;
1825 		} else {
1826 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1827 			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1828 		}
1829 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1830 		ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
1831 	}
1832 
1833 	return ret_val;
1834 }
1835 
1836 /**
1837  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1838  *  @hw:   pointer to the HW structure
1839  *  @gate: boolean set to TRUE to gate, FALSE to ungate
1840  *
1841  *  Gate/ungate the automatic PHY configuration via hardware; perform
1842  *  the configuration via software instead.
1843  **/
1844 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1845 {
1846 	u32 extcnf_ctrl;
1847 
1848 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1849 
1850 	if (hw->mac.type != e1000_pch2lan)
1851 		return;
1852 
1853 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1854 
1855 	if (gate)
1856 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1857 	else
1858 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1859 
1860 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1861 }
1862 
1863 /**
1864  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
1865  *  @hw: pointer to the HW structure
1866  *
1867  *  Check the appropriate indication the MAC has finished configuring the
1868  *  PHY after a software reset.
1869  **/
1870 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1871 {
1872 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1873 
1874 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
1875 
1876 	/* Wait for basic configuration completes before proceeding */
1877 	do {
1878 		data = E1000_READ_REG(hw, E1000_STATUS);
1879 		data &= E1000_STATUS_LAN_INIT_DONE;
1880 		usec_delay(100);
1881 	} while ((!data) && --loop);
1882 
1883 	/*
1884 	 * If basic configuration is incomplete before the above loop
1885 	 * count reaches 0, loading the configuration from NVM will
1886 	 * leave the PHY in a bad state possibly resulting in no link.
1887 	 */
1888 	if (loop == 0)
1889 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1890 
1891 	/* Clear the Init Done bit for the next init event */
1892 	data = E1000_READ_REG(hw, E1000_STATUS);
1893 	data &= ~E1000_STATUS_LAN_INIT_DONE;
1894 	E1000_WRITE_REG(hw, E1000_STATUS, data);
1895 }
1896 
1897 /**
1898  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1899  *  @hw: pointer to the HW structure
1900  **/
1901 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1902 {
1903 	s32 ret_val = E1000_SUCCESS;
1904 	u16 reg;
1905 
1906 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1907 
1908 	if (hw->phy.ops.check_reset_block(hw))
1909 		return E1000_SUCCESS;
1910 
1911 	/* Allow time for h/w to get to quiescent state after reset */
1912 	msec_delay(10);
1913 
1914 	/* Perform any necessary post-reset workarounds */
1915 	switch (hw->mac.type) {
1916 	case e1000_pchlan:
1917 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1918 		if (ret_val)
1919 			return ret_val;
1920 		break;
1921 	case e1000_pch2lan:
1922 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1923 		if (ret_val)
1924 			return ret_val;
1925 		break;
1926 	default:
1927 		break;
1928 	}
1929 
1930 	/* Clear the host wakeup bit after lcd reset */
1931 	if (hw->mac.type >= e1000_pchlan) {
1932 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
1933 		reg &= ~BM_WUC_HOST_WU_BIT;
1934 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
1935 	}
1936 
1937 	/* Configure the LCD with the extended configuration region in NVM */
1938 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
1939 	if (ret_val)
1940 		return ret_val;
1941 
1942 	/* Configure the LCD with the OEM bits in NVM */
1943 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1944 
1945 	if (hw->mac.type == e1000_pch2lan) {
1946 		/* Ungate automatic PHY configuration on non-managed 82579 */
1947 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
1948 		    E1000_ICH_FWSM_FW_VALID)) {
1949 			msec_delay(10);
1950 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1951 		}
1952 
1953 		/* Set EEE LPI Update Timer to 200usec */
1954 		ret_val = hw->phy.ops.acquire(hw);
1955 		if (ret_val)
1956 			return ret_val;
1957 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1958 						       I82579_LPI_UPDATE_TIMER);
1959 		if (!ret_val)
1960 			ret_val = hw->phy.ops.write_reg_locked(hw,
1961 							       I82579_EMI_DATA,
1962 							       0x1387);
1963 		hw->phy.ops.release(hw);
1964 	}
1965 
1966 	return ret_val;
1967 }
1968 
1969 /**
1970  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1971  *  @hw: pointer to the HW structure
1972  *
1973  *  Resets the PHY
1974  *  This is a function pointer entry point called by drivers
1975  *  or other shared routines.
1976  **/
1977 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1978 {
1979 	s32 ret_val = E1000_SUCCESS;
1980 
1981 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1982 
1983 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
1984 	if ((hw->mac.type == e1000_pch2lan) &&
1985 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
1986 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
1987 
1988 	ret_val = e1000_phy_hw_reset_generic(hw);
1989 	if (ret_val)
1990 		return ret_val;
1991 
1992 	return e1000_post_phy_reset_ich8lan(hw);
1993 }
1994 
1995 /**
1996  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1997  *  @hw: pointer to the HW structure
1998  *  @active: TRUE to enable LPLU, FALSE to disable
1999  *
2000  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2001  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2002  *  the phy speed. This function will manually set the LPLU bit and restart
2003  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2004  *  since it configures the same bit.
2005  **/
2006 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2007 {
2008 	s32 ret_val = E1000_SUCCESS;
2009 	u16 oem_reg;
2010 
2011 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
2012 
2013 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2014 	if (ret_val)
2015 		return ret_val;
2016 
2017 	if (active)
2018 		oem_reg |= HV_OEM_BITS_LPLU;
2019 	else
2020 		oem_reg &= ~HV_OEM_BITS_LPLU;
2021 
2022 	if (!hw->phy.ops.check_reset_block(hw))
2023 		oem_reg |= HV_OEM_BITS_RESTART_AN;
2024 
2025 	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2026 }
2027 
2028 /**
2029  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2030  *  @hw: pointer to the HW structure
2031  *  @active: TRUE to enable LPLU, FALSE to disable
2032  *
2033  *  Sets the LPLU D0 state according to the active flag.  When
2034  *  activating LPLU this function also disables smart speed
2035  *  and vice versa.  LPLU will not be activated unless the
2036  *  device autonegotiation advertisement meets standards of
2037  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2038  *  This is a function pointer entry point only called by
2039  *  PHY setup routines.
2040  **/
2041 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2042 {
2043 	struct e1000_phy_info *phy = &hw->phy;
2044 	u32 phy_ctrl;
2045 	s32 ret_val = E1000_SUCCESS;
2046 	u16 data;
2047 
2048 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2049 
2050 	if (phy->type == e1000_phy_ife)
2051 		return E1000_SUCCESS;
2052 
2053 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2054 
2055 	if (active) {
2056 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2057 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2058 
2059 		if (phy->type != e1000_phy_igp_3)
2060 			return E1000_SUCCESS;
2061 
2062 		/*
2063 		 * Call gig speed drop workaround on LPLU before accessing
2064 		 * any PHY registers
2065 		 */
2066 		if (hw->mac.type == e1000_ich8lan)
2067 			e1000_gig_downshift_workaround_ich8lan(hw);
2068 
2069 		/* When LPLU is enabled, we should disable SmartSpeed */
2070 		ret_val = phy->ops.read_reg(hw,
2071 					    IGP01E1000_PHY_PORT_CONFIG,
2072 					    &data);
2073 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2074 		ret_val = phy->ops.write_reg(hw,
2075 					     IGP01E1000_PHY_PORT_CONFIG,
2076 					     data);
2077 		if (ret_val)
2078 			return ret_val;
2079 	} else {
2080 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2081 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2082 
2083 		if (phy->type != e1000_phy_igp_3)
2084 			return E1000_SUCCESS;
2085 
2086 		/*
2087 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2088 		 * during Dx states where the power conservation is most
2089 		 * important.  During driver activity we should enable
2090 		 * SmartSpeed, so performance is maintained.
2091 		 */
2092 		if (phy->smart_speed == e1000_smart_speed_on) {
2093 			ret_val = phy->ops.read_reg(hw,
2094 						    IGP01E1000_PHY_PORT_CONFIG,
2095 						    &data);
2096 			if (ret_val)
2097 				return ret_val;
2098 
2099 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2100 			ret_val = phy->ops.write_reg(hw,
2101 						     IGP01E1000_PHY_PORT_CONFIG,
2102 						     data);
2103 			if (ret_val)
2104 				return ret_val;
2105 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2106 			ret_val = phy->ops.read_reg(hw,
2107 						    IGP01E1000_PHY_PORT_CONFIG,
2108 						    &data);
2109 			if (ret_val)
2110 				return ret_val;
2111 
2112 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2113 			ret_val = phy->ops.write_reg(hw,
2114 						     IGP01E1000_PHY_PORT_CONFIG,
2115 						     data);
2116 			if (ret_val)
2117 				return ret_val;
2118 		}
2119 	}
2120 
2121 	return E1000_SUCCESS;
2122 }
2123 
2124 /**
2125  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2126  *  @hw: pointer to the HW structure
2127  *  @active: TRUE to enable LPLU, FALSE to disable
2128  *
2129  *  Sets the LPLU D3 state according to the active flag.  When
2130  *  activating LPLU this function also disables smart speed
2131  *  and vice versa.  LPLU will not be activated unless the
2132  *  device autonegotiation advertisement meets standards of
2133  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2134  *  This is a function pointer entry point only called by
2135  *  PHY setup routines.
2136  **/
2137 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2138 {
2139 	struct e1000_phy_info *phy = &hw->phy;
2140 	u32 phy_ctrl;
2141 	s32 ret_val = E1000_SUCCESS;
2142 	u16 data;
2143 
2144 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2145 
2146 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2147 
2148 	if (!active) {
2149 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2150 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2151 
2152 		if (phy->type != e1000_phy_igp_3)
2153 			return E1000_SUCCESS;
2154 
2155 		/*
2156 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2157 		 * during Dx states where the power conservation is most
2158 		 * important.  During driver activity we should enable
2159 		 * SmartSpeed, so performance is maintained.
2160 		 */
2161 		if (phy->smart_speed == e1000_smart_speed_on) {
2162 			ret_val = phy->ops.read_reg(hw,
2163 						    IGP01E1000_PHY_PORT_CONFIG,
2164 						    &data);
2165 			if (ret_val)
2166 				return ret_val;
2167 
2168 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2169 			ret_val = phy->ops.write_reg(hw,
2170 						     IGP01E1000_PHY_PORT_CONFIG,
2171 						     data);
2172 			if (ret_val)
2173 				return ret_val;
2174 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2175 			ret_val = phy->ops.read_reg(hw,
2176 						    IGP01E1000_PHY_PORT_CONFIG,
2177 						    &data);
2178 			if (ret_val)
2179 				return ret_val;
2180 
2181 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2182 			ret_val = phy->ops.write_reg(hw,
2183 						     IGP01E1000_PHY_PORT_CONFIG,
2184 						     data);
2185 			if (ret_val)
2186 				return ret_val;
2187 		}
2188 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2189 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2190 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2191 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2192 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2193 
2194 		if (phy->type != e1000_phy_igp_3)
2195 			return E1000_SUCCESS;
2196 
2197 		/*
2198 		 * Call gig speed drop workaround on LPLU before accessing
2199 		 * any PHY registers
2200 		 */
2201 		if (hw->mac.type == e1000_ich8lan)
2202 			e1000_gig_downshift_workaround_ich8lan(hw);
2203 
2204 		/* When LPLU is enabled, we should disable SmartSpeed */
2205 		ret_val = phy->ops.read_reg(hw,
2206 					    IGP01E1000_PHY_PORT_CONFIG,
2207 					    &data);
2208 		if (ret_val)
2209 			return ret_val;
2210 
2211 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2212 		ret_val = phy->ops.write_reg(hw,
2213 					     IGP01E1000_PHY_PORT_CONFIG,
2214 					     data);
2215 	}
2216 
2217 	return ret_val;
2218 }
2219 
2220 /**
2221  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2222  *  @hw: pointer to the HW structure
2223  *  @bank:  pointer to the variable that returns the active bank
2224  *
2225  *  Reads signature byte from the NVM using the flash access registers.
2226  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2227  **/
2228 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2229 {
2230 	u32 eecd;
2231 	struct e1000_nvm_info *nvm = &hw->nvm;
2232 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2233 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2234 	u8 sig_byte = 0;
2235 	s32 ret_val;
2236 
2237 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2238 
2239 	switch (hw->mac.type) {
2240 	case e1000_ich8lan:
2241 	case e1000_ich9lan:
2242 		eecd = E1000_READ_REG(hw, E1000_EECD);
2243 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2244 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2245 			if (eecd & E1000_EECD_SEC1VAL)
2246 				*bank = 1;
2247 			else
2248 				*bank = 0;
2249 
2250 			return E1000_SUCCESS;
2251 		}
2252 		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2253 		/* fall-thru */
2254 	default:
2255 		/* set bank to 0 in case flash read fails */
2256 		*bank = 0;
2257 
2258 		/* Check bank 0 */
2259 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2260 							&sig_byte);
2261 		if (ret_val)
2262 			return ret_val;
2263 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2264 		    E1000_ICH_NVM_SIG_VALUE) {
2265 			*bank = 0;
2266 			return E1000_SUCCESS;
2267 		}
2268 
2269 		/* Check bank 1 */
2270 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2271 							bank1_offset,
2272 							&sig_byte);
2273 		if (ret_val)
2274 			return ret_val;
2275 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2276 		    E1000_ICH_NVM_SIG_VALUE) {
2277 			*bank = 1;
2278 			return E1000_SUCCESS;
2279 		}
2280 
2281 		DEBUGOUT("ERROR: No valid NVM bank present\n");
2282 		return -E1000_ERR_NVM;
2283 	}
2284 }
2285 
2286 /**
2287  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2288  *  @hw: pointer to the HW structure
2289  *  @offset: The offset (in bytes) of the word(s) to read.
2290  *  @words: Size of data to read in words
2291  *  @data: Pointer to the word(s) to read at offset.
2292  *
2293  *  Reads a word(s) from the NVM using the flash access registers.
2294  **/
2295 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2296 				  u16 *data)
2297 {
2298 	struct e1000_nvm_info *nvm = &hw->nvm;
2299 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2300 	u32 act_offset;
2301 	s32 ret_val = E1000_SUCCESS;
2302 	u32 bank = 0;
2303 	u16 i, word;
2304 
2305 	DEBUGFUNC("e1000_read_nvm_ich8lan");
2306 
2307 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2308 	    (words == 0)) {
2309 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2310 		ret_val = -E1000_ERR_NVM;
2311 		goto out;
2312 	}
2313 
2314 	nvm->ops.acquire(hw);
2315 
2316 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2317 	if (ret_val != E1000_SUCCESS) {
2318 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2319 		bank = 0;
2320 	}
2321 
2322 	act_offset = (bank) ? nvm->flash_bank_size : 0;
2323 	act_offset += offset;
2324 
2325 	ret_val = E1000_SUCCESS;
2326 	for (i = 0; i < words; i++) {
2327 		if (dev_spec->shadow_ram[offset+i].modified) {
2328 			data[i] = dev_spec->shadow_ram[offset+i].value;
2329 		} else {
2330 			ret_val = e1000_read_flash_word_ich8lan(hw,
2331 								act_offset + i,
2332 								&word);
2333 			if (ret_val)
2334 				break;
2335 			data[i] = word;
2336 		}
2337 	}
2338 
2339 	nvm->ops.release(hw);
2340 
2341 out:
2342 	if (ret_val)
2343 		DEBUGOUT1("NVM read error: %d\n", ret_val);
2344 
2345 	return ret_val;
2346 }
2347 
2348 /**
2349  *  e1000_flash_cycle_init_ich8lan - Initialize flash
2350  *  @hw: pointer to the HW structure
2351  *
2352  *  This function does initial flash setup so that a new read/write/erase cycle
2353  *  can be started.
2354  **/
2355 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2356 {
2357 	union ich8_hws_flash_status hsfsts;
2358 	s32 ret_val = -E1000_ERR_NVM;
2359 
2360 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2361 
2362 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2363 
2364 	/* Check if the flash descriptor is valid */
2365 	if (hsfsts.hsf_status.fldesvalid == 0) {
2366 		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
2367 		return -E1000_ERR_NVM;
2368 	}
2369 
2370 	/* Clear FCERR and DAEL in hw status by writing 1 */
2371 	hsfsts.hsf_status.flcerr = 1;
2372 	hsfsts.hsf_status.dael = 1;
2373 
2374 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2375 
2376 	/*
2377 	 * Either we should have a hardware SPI cycle in progress
2378 	 * bit to check against, in order to start a new cycle or
2379 	 * FDONE bit should be changed in the hardware so that it
2380 	 * is 1 after hardware reset, which can then be used as an
2381 	 * indication whether a cycle is in progress or has been
2382 	 * completed.
2383 	 */
2384 
2385 	if (hsfsts.hsf_status.flcinprog == 0) {
2386 		/*
2387 		 * There is no cycle running at present,
2388 		 * so we can start a cycle.
2389 		 * Begin by setting Flash Cycle Done.
2390 		 */
2391 		hsfsts.hsf_status.flcdone = 1;
2392 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2393 		ret_val = E1000_SUCCESS;
2394 	} else {
2395 		s32 i;
2396 
2397 		/*
2398 		 * Otherwise poll for sometime so the current
2399 		 * cycle has a chance to end before giving up.
2400 		 */
2401 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2402 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2403 							      ICH_FLASH_HSFSTS);
2404 			if (hsfsts.hsf_status.flcinprog == 0) {
2405 				ret_val = E1000_SUCCESS;
2406 				break;
2407 			}
2408 			usec_delay(1);
2409 		}
2410 		if (ret_val == E1000_SUCCESS) {
2411 			/*
2412 			 * Successful in waiting for previous cycle to timeout,
2413 			 * now set the Flash Cycle Done.
2414 			 */
2415 			hsfsts.hsf_status.flcdone = 1;
2416 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2417 						hsfsts.regval);
2418 		} else {
2419 			DEBUGOUT("Flash controller busy, cannot get access\n");
2420 		}
2421 	}
2422 
2423 	return ret_val;
2424 }
2425 
2426 /**
2427  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2428  *  @hw: pointer to the HW structure
2429  *  @timeout: maximum time to wait for completion
2430  *
2431  *  This function starts a flash cycle and waits for its completion.
2432  **/
2433 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2434 {
2435 	union ich8_hws_flash_ctrl hsflctl;
2436 	union ich8_hws_flash_status hsfsts;
2437 	u32 i = 0;
2438 
2439 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
2440 
2441 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2442 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2443 	hsflctl.hsf_ctrl.flcgo = 1;
2444 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2445 
2446 	/* wait till FDONE bit is set to 1 */
2447 	do {
2448 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2449 		if (hsfsts.hsf_status.flcdone == 1)
2450 			break;
2451 		usec_delay(1);
2452 	} while (i++ < timeout);
2453 
2454 	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2455 		return E1000_SUCCESS;
2456 
2457 	return -E1000_ERR_NVM;
2458 }
2459 
2460 /**
2461  *  e1000_read_flash_word_ich8lan - Read word from flash
2462  *  @hw: pointer to the HW structure
2463  *  @offset: offset to data location
2464  *  @data: pointer to the location for storing the data
2465  *
2466  *  Reads the flash word at offset into data.  Offset is converted
2467  *  to bytes before read.
2468  **/
2469 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2470 					 u16 *data)
2471 {
2472 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
2473 
2474 	if (!data)
2475 		return -E1000_ERR_NVM;
2476 
2477 	/* Must convert offset into bytes. */
2478 	offset <<= 1;
2479 
2480 	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2481 }
2482 
2483 /**
2484  *  e1000_read_flash_byte_ich8lan - Read byte from flash
2485  *  @hw: pointer to the HW structure
2486  *  @offset: The offset of the byte to read.
2487  *  @data: Pointer to a byte to store the value read.
2488  *
2489  *  Reads a single byte from the NVM using the flash access registers.
2490  **/
2491 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2492 					 u8 *data)
2493 {
2494 	s32 ret_val;
2495 	u16 word = 0;
2496 
2497 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2498 	if (ret_val)
2499 		return ret_val;
2500 
2501 	*data = (u8)word;
2502 
2503 	return E1000_SUCCESS;
2504 }
2505 
2506 /**
2507  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
2508  *  @hw: pointer to the HW structure
2509  *  @offset: The offset (in bytes) of the byte or word to read.
2510  *  @size: Size of data to read, 1=byte 2=word
2511  *  @data: Pointer to the word to store the value read.
2512  *
2513  *  Reads a byte or word from the NVM using the flash access registers.
2514  **/
2515 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2516 					 u8 size, u16 *data)
2517 {
2518 	union ich8_hws_flash_status hsfsts;
2519 	union ich8_hws_flash_ctrl hsflctl;
2520 	u32 flash_linear_addr;
2521 	u32 flash_data = 0;
2522 	s32 ret_val = -E1000_ERR_NVM;
2523 	u8 count = 0;
2524 
2525 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
2526 
2527 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2528 		return -E1000_ERR_NVM;
2529 
2530 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2531 			    hw->nvm.flash_base_addr;
2532 
2533 	do {
2534 		usec_delay(1);
2535 		/* Steps */
2536 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2537 		if (ret_val != E1000_SUCCESS)
2538 			break;
2539 
2540 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2541 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2542 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2543 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2544 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2545 
2546 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2547 
2548 		ret_val = e1000_flash_cycle_ich8lan(hw,
2549 						ICH_FLASH_READ_COMMAND_TIMEOUT);
2550 
2551 		/*
2552 		 * Check if FCERR is set to 1, if set to 1, clear it
2553 		 * and try the whole sequence a few more times, else
2554 		 * read in (shift in) the Flash Data0, the order is
2555 		 * least significant byte first msb to lsb
2556 		 */
2557 		if (ret_val == E1000_SUCCESS) {
2558 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2559 			if (size == 1)
2560 				*data = (u8)(flash_data & 0x000000FF);
2561 			else if (size == 2)
2562 				*data = (u16)(flash_data & 0x0000FFFF);
2563 			break;
2564 		} else {
2565 			/*
2566 			 * If we've gotten here, then things are probably
2567 			 * completely hosed, but if the error condition is
2568 			 * detected, it won't hurt to give it another try...
2569 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2570 			 */
2571 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2572 							      ICH_FLASH_HSFSTS);
2573 			if (hsfsts.hsf_status.flcerr == 1) {
2574 				/* Repeat for some time before giving up. */
2575 				continue;
2576 			} else if (hsfsts.hsf_status.flcdone == 0) {
2577 				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
2578 				break;
2579 			}
2580 		}
2581 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2582 
2583 	return ret_val;
2584 }
2585 
2586 /**
2587  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
2588  *  @hw: pointer to the HW structure
2589  *  @offset: The offset (in bytes) of the word(s) to write.
2590  *  @words: Size of data to write in words
2591  *  @data: Pointer to the word(s) to write at offset.
2592  *
2593  *  Writes a byte or word to the NVM using the flash access registers.
2594  **/
2595 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2596 				   u16 *data)
2597 {
2598 	struct e1000_nvm_info *nvm = &hw->nvm;
2599 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2600 	u16 i;
2601 
2602 	DEBUGFUNC("e1000_write_nvm_ich8lan");
2603 
2604 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2605 	    (words == 0)) {
2606 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2607 		return -E1000_ERR_NVM;
2608 	}
2609 
2610 	nvm->ops.acquire(hw);
2611 
2612 	for (i = 0; i < words; i++) {
2613 		dev_spec->shadow_ram[offset+i].modified = TRUE;
2614 		dev_spec->shadow_ram[offset+i].value = data[i];
2615 	}
2616 
2617 	nvm->ops.release(hw);
2618 
2619 	return E1000_SUCCESS;
2620 }
2621 
2622 /**
2623  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2624  *  @hw: pointer to the HW structure
2625  *
2626  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
2627  *  which writes the checksum to the shadow ram.  The changes in the shadow
2628  *  ram are then committed to the EEPROM by processing each bank at a time
2629  *  checking for the modified bit and writing only the pending changes.
2630  *  After a successful commit, the shadow ram is cleared and is ready for
2631  *  future writes.
2632  **/
2633 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2634 {
2635 	struct e1000_nvm_info *nvm = &hw->nvm;
2636 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2637 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2638 	s32 ret_val;
2639 	u16 data;
2640 
2641 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2642 
2643 	ret_val = e1000_update_nvm_checksum_generic(hw);
2644 	if (ret_val)
2645 		goto out;
2646 
2647 	if (nvm->type != e1000_nvm_flash_sw)
2648 		goto out;
2649 
2650 	nvm->ops.acquire(hw);
2651 
2652 	/*
2653 	 * We're writing to the opposite bank so if we're on bank 1,
2654 	 * write to bank 0 etc.  We also need to erase the segment that
2655 	 * is going to be written
2656 	 */
2657 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2658 	if (ret_val != E1000_SUCCESS) {
2659 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2660 		bank = 0;
2661 	}
2662 
2663 	if (bank == 0) {
2664 		new_bank_offset = nvm->flash_bank_size;
2665 		old_bank_offset = 0;
2666 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2667 		if (ret_val)
2668 			goto release;
2669 	} else {
2670 		old_bank_offset = nvm->flash_bank_size;
2671 		new_bank_offset = 0;
2672 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2673 		if (ret_val)
2674 			goto release;
2675 	}
2676 
2677 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2678 		/*
2679 		 * Determine whether to write the value stored
2680 		 * in the other NVM bank or a modified value stored
2681 		 * in the shadow RAM
2682 		 */
2683 		if (dev_spec->shadow_ram[i].modified) {
2684 			data = dev_spec->shadow_ram[i].value;
2685 		} else {
2686 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
2687 								old_bank_offset,
2688 								&data);
2689 			if (ret_val)
2690 				break;
2691 		}
2692 
2693 		/*
2694 		 * If the word is 0x13, then make sure the signature bits
2695 		 * (15:14) are 11b until the commit has completed.
2696 		 * This will allow us to write 10b which indicates the
2697 		 * signature is valid.  We want to do this after the write
2698 		 * has completed so that we don't mark the segment valid
2699 		 * while the write is still in progress
2700 		 */
2701 		if (i == E1000_ICH_NVM_SIG_WORD)
2702 			data |= E1000_ICH_NVM_SIG_MASK;
2703 
2704 		/* Convert offset to bytes. */
2705 		act_offset = (i + new_bank_offset) << 1;
2706 
2707 		usec_delay(100);
2708 		/* Write the bytes to the new bank. */
2709 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2710 							       act_offset,
2711 							       (u8)data);
2712 		if (ret_val)
2713 			break;
2714 
2715 		usec_delay(100);
2716 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2717 							  act_offset + 1,
2718 							  (u8)(data >> 8));
2719 		if (ret_val)
2720 			break;
2721 	}
2722 
2723 	/*
2724 	 * Don't bother writing the segment valid bits if sector
2725 	 * programming failed.
2726 	 */
2727 	if (ret_val) {
2728 		DEBUGOUT("Flash commit failed.\n");
2729 		goto release;
2730 	}
2731 
2732 	/*
2733 	 * Finally validate the new segment by setting bit 15:14
2734 	 * to 10b in word 0x13 , this can be done without an
2735 	 * erase as well since these bits are 11 to start with
2736 	 * and we need to change bit 14 to 0b
2737 	 */
2738 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2739 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2740 	if (ret_val)
2741 		goto release;
2742 
2743 	data &= 0xBFFF;
2744 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2745 						       act_offset * 2 + 1,
2746 						       (u8)(data >> 8));
2747 	if (ret_val)
2748 		goto release;
2749 
2750 	/*
2751 	 * And invalidate the previously valid segment by setting
2752 	 * its signature word (0x13) high_byte to 0b. This can be
2753 	 * done without an erase because flash erase sets all bits
2754 	 * to 1's. We can write 1's to 0's without an erase
2755 	 */
2756 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2757 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2758 	if (ret_val)
2759 		goto release;
2760 
2761 	/* Great!  Everything worked, we can now clear the cached entries. */
2762 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2763 		dev_spec->shadow_ram[i].modified = FALSE;
2764 		dev_spec->shadow_ram[i].value = 0xFFFF;
2765 	}
2766 
2767 release:
2768 	nvm->ops.release(hw);
2769 
2770 	/*
2771 	 * Reload the EEPROM, or else modifications will not appear
2772 	 * until after the next adapter reset.
2773 	 */
2774 	if (!ret_val) {
2775 		nvm->ops.reload(hw);
2776 		msec_delay(10);
2777 	}
2778 
2779 out:
2780 	if (ret_val)
2781 		DEBUGOUT1("NVM update error: %d\n", ret_val);
2782 
2783 	return ret_val;
2784 }
2785 
2786 /**
2787  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2788  *  @hw: pointer to the HW structure
2789  *
2790  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2791  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
2792  *  calculated, in which case we need to calculate the checksum and set bit 6.
2793  **/
2794 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2795 {
2796 	s32 ret_val;
2797 	u16 data;
2798 
2799 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2800 
2801 	/*
2802 	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
2803 	 * needs to be fixed.  This bit is an indication that the NVM
2804 	 * was prepared by OEM software and did not calculate the
2805 	 * checksum...a likely scenario.
2806 	 */
2807 	ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2808 	if (ret_val)
2809 		return ret_val;
2810 
2811 	if ((data & 0x40) == 0) {
2812 		data |= 0x40;
2813 		ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2814 		if (ret_val)
2815 			return ret_val;
2816 		ret_val = hw->nvm.ops.update(hw);
2817 		if (ret_val)
2818 			return ret_val;
2819 	}
2820 
2821 	return e1000_validate_nvm_checksum_generic(hw);
2822 }
2823 
2824 /**
2825  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2826  *  @hw: pointer to the HW structure
2827  *  @offset: The offset (in bytes) of the byte/word to read.
2828  *  @size: Size of data to read, 1=byte 2=word
2829  *  @data: The byte(s) to write to the NVM.
2830  *
2831  *  Writes one/two bytes to the NVM using the flash access registers.
2832  **/
2833 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2834 					  u8 size, u16 data)
2835 {
2836 	union ich8_hws_flash_status hsfsts;
2837 	union ich8_hws_flash_ctrl hsflctl;
2838 	u32 flash_linear_addr;
2839 	u32 flash_data = 0;
2840 	s32 ret_val;
2841 	u8 count = 0;
2842 
2843 	DEBUGFUNC("e1000_write_ich8_data");
2844 
2845 	if (size < 1 || size > 2 || data > size * 0xff ||
2846 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
2847 		return -E1000_ERR_NVM;
2848 
2849 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2850 			    hw->nvm.flash_base_addr;
2851 
2852 	do {
2853 		usec_delay(1);
2854 		/* Steps */
2855 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2856 		if (ret_val != E1000_SUCCESS)
2857 			break;
2858 
2859 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2860 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2861 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2862 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2863 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2864 
2865 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2866 
2867 		if (size == 1)
2868 			flash_data = (u32)data & 0x00FF;
2869 		else
2870 			flash_data = (u32)data;
2871 
2872 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2873 
2874 		/*
2875 		 * check if FCERR is set to 1 , if set to 1, clear it
2876 		 * and try the whole sequence a few more times else done
2877 		 */
2878 		ret_val = e1000_flash_cycle_ich8lan(hw,
2879 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2880 		if (ret_val == E1000_SUCCESS)
2881 			break;
2882 
2883 		/*
2884 		 * If we're here, then things are most likely
2885 		 * completely hosed, but if the error condition
2886 		 * is detected, it won't hurt to give it another
2887 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2888 		 */
2889 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2890 		if (hsfsts.hsf_status.flcerr == 1)
2891 			/* Repeat for some time before giving up. */
2892 			continue;
2893 		if (hsfsts.hsf_status.flcdone == 0) {
2894 			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
2895 			break;
2896 		}
2897 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2898 
2899 	return ret_val;
2900 }
2901 
2902 /**
2903  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2904  *  @hw: pointer to the HW structure
2905  *  @offset: The index of the byte to read.
2906  *  @data: The byte to write to the NVM.
2907  *
2908  *  Writes a single byte to the NVM using the flash access registers.
2909  **/
2910 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2911 					  u8 data)
2912 {
2913 	u16 word = (u16)data;
2914 
2915 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2916 
2917 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2918 }
2919 
2920 /**
2921  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2922  *  @hw: pointer to the HW structure
2923  *  @offset: The offset of the byte to write.
2924  *  @byte: The byte to write to the NVM.
2925  *
2926  *  Writes a single byte to the NVM using the flash access registers.
2927  *  Goes through a retry algorithm before giving up.
2928  **/
2929 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2930 						u32 offset, u8 byte)
2931 {
2932 	s32 ret_val;
2933 	u16 program_retries;
2934 
2935 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2936 
2937 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2938 	if (!ret_val)
2939 		return ret_val;
2940 
2941 	for (program_retries = 0; program_retries < 100; program_retries++) {
2942 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2943 		usec_delay(100);
2944 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2945 		if (ret_val == E1000_SUCCESS)
2946 			break;
2947 	}
2948 	if (program_retries == 100)
2949 		return -E1000_ERR_NVM;
2950 
2951 	return E1000_SUCCESS;
2952 }
2953 
2954 /**
2955  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2956  *  @hw: pointer to the HW structure
2957  *  @bank: 0 for first bank, 1 for second bank, etc.
2958  *
2959  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2960  *  bank N is 4096 * N + flash_reg_addr.
2961  **/
2962 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2963 {
2964 	struct e1000_nvm_info *nvm = &hw->nvm;
2965 	union ich8_hws_flash_status hsfsts;
2966 	union ich8_hws_flash_ctrl hsflctl;
2967 	u32 flash_linear_addr;
2968 	/* bank size is in 16bit words - adjust to bytes */
2969 	u32 flash_bank_size = nvm->flash_bank_size * 2;
2970 	s32 ret_val;
2971 	s32 count = 0;
2972 	s32 j, iteration, sector_size;
2973 
2974 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
2975 
2976 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2977 
2978 	/*
2979 	 * Determine HW Sector size: Read BERASE bits of hw flash status
2980 	 * register
2981 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2982 	 *     consecutive sectors.  The start index for the nth Hw sector
2983 	 *     can be calculated as = bank * 4096 + n * 256
2984 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2985 	 *     The start index for the nth Hw sector can be calculated
2986 	 *     as = bank * 4096
2987 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2988 	 *     (ich9 only, otherwise error condition)
2989 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2990 	 */
2991 	switch (hsfsts.hsf_status.berasesz) {
2992 	case 0:
2993 		/* Hw sector size 256 */
2994 		sector_size = ICH_FLASH_SEG_SIZE_256;
2995 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2996 		break;
2997 	case 1:
2998 		sector_size = ICH_FLASH_SEG_SIZE_4K;
2999 		iteration = 1;
3000 		break;
3001 	case 2:
3002 		sector_size = ICH_FLASH_SEG_SIZE_8K;
3003 		iteration = 1;
3004 		break;
3005 	case 3:
3006 		sector_size = ICH_FLASH_SEG_SIZE_64K;
3007 		iteration = 1;
3008 		break;
3009 	default:
3010 		return -E1000_ERR_NVM;
3011 	}
3012 
3013 	/* Start with the base address, then add the sector offset. */
3014 	flash_linear_addr = hw->nvm.flash_base_addr;
3015 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3016 
3017 	for (j = 0; j < iteration ; j++) {
3018 		do {
3019 			/* Steps */
3020 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3021 			if (ret_val)
3022 				return ret_val;
3023 
3024 			/*
3025 			 * Write a value 11 (block Erase) in Flash
3026 			 * Cycle field in hw flash control
3027 			 */
3028 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3029 							      ICH_FLASH_HSFCTL);
3030 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3031 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3032 						hsflctl.regval);
3033 
3034 			/*
3035 			 * Write the last 24 bits of an index within the
3036 			 * block into Flash Linear address field in Flash
3037 			 * Address.
3038 			 */
3039 			flash_linear_addr += (j * sector_size);
3040 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3041 					      flash_linear_addr);
3042 
3043 			ret_val = e1000_flash_cycle_ich8lan(hw,
3044 					       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3045 			if (ret_val == E1000_SUCCESS)
3046 				break;
3047 
3048 			/*
3049 			 * Check if FCERR is set to 1.  If 1,
3050 			 * clear it and try the whole sequence
3051 			 * a few more times else Done
3052 			 */
3053 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3054 						      ICH_FLASH_HSFSTS);
3055 			if (hsfsts.hsf_status.flcerr == 1)
3056 				/* repeat for some time before giving up */
3057 				continue;
3058 			else if (hsfsts.hsf_status.flcdone == 0)
3059 				return ret_val;
3060 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3061 	}
3062 
3063 	return E1000_SUCCESS;
3064 }
3065 
3066 /**
3067  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3068  *  @hw: pointer to the HW structure
3069  *  @data: Pointer to the LED settings
3070  *
3071  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3072  *  settings is all 0's or F's, set the LED default to a valid LED default
3073  *  setting.
3074  **/
3075 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3076 {
3077 	s32 ret_val;
3078 
3079 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
3080 
3081 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3082 	if (ret_val) {
3083 		DEBUGOUT("NVM Read Error\n");
3084 		return ret_val;
3085 	}
3086 
3087 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3088 		*data = ID_LED_DEFAULT_ICH8LAN;
3089 
3090 	return E1000_SUCCESS;
3091 }
3092 
3093 /**
3094  *  e1000_id_led_init_pchlan - store LED configurations
3095  *  @hw: pointer to the HW structure
3096  *
3097  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3098  *  the PHY LED configuration register.
3099  *
3100  *  PCH also does not have an "always on" or "always off" mode which
3101  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3102  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3103  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3104  *  link based on logic in e1000_led_[on|off]_pchlan().
3105  **/
3106 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3107 {
3108 	struct e1000_mac_info *mac = &hw->mac;
3109 	s32 ret_val;
3110 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3111 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3112 	u16 data, i, temp, shift;
3113 
3114 	DEBUGFUNC("e1000_id_led_init_pchlan");
3115 
3116 	/* Get default ID LED modes */
3117 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3118 	if (ret_val)
3119 		return ret_val;
3120 
3121 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3122 	mac->ledctl_mode1 = mac->ledctl_default;
3123 	mac->ledctl_mode2 = mac->ledctl_default;
3124 
3125 	for (i = 0; i < 4; i++) {
3126 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3127 		shift = (i * 5);
3128 		switch (temp) {
3129 		case ID_LED_ON1_DEF2:
3130 		case ID_LED_ON1_ON2:
3131 		case ID_LED_ON1_OFF2:
3132 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3133 			mac->ledctl_mode1 |= (ledctl_on << shift);
3134 			break;
3135 		case ID_LED_OFF1_DEF2:
3136 		case ID_LED_OFF1_ON2:
3137 		case ID_LED_OFF1_OFF2:
3138 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3139 			mac->ledctl_mode1 |= (ledctl_off << shift);
3140 			break;
3141 		default:
3142 			/* Do nothing */
3143 			break;
3144 		}
3145 		switch (temp) {
3146 		case ID_LED_DEF1_ON2:
3147 		case ID_LED_ON1_ON2:
3148 		case ID_LED_OFF1_ON2:
3149 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3150 			mac->ledctl_mode2 |= (ledctl_on << shift);
3151 			break;
3152 		case ID_LED_DEF1_OFF2:
3153 		case ID_LED_ON1_OFF2:
3154 		case ID_LED_OFF1_OFF2:
3155 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3156 			mac->ledctl_mode2 |= (ledctl_off << shift);
3157 			break;
3158 		default:
3159 			/* Do nothing */
3160 			break;
3161 		}
3162 	}
3163 
3164 	return E1000_SUCCESS;
3165 }
3166 
3167 /**
3168  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3169  *  @hw: pointer to the HW structure
3170  *
3171  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3172  *  register, so the the bus width is hard coded.
3173  **/
3174 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3175 {
3176 	struct e1000_bus_info *bus = &hw->bus;
3177 	s32 ret_val;
3178 
3179 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
3180 
3181 	ret_val = e1000_get_bus_info_pcie_generic(hw);
3182 
3183 	/*
3184 	 * ICH devices are "PCI Express"-ish.  They have
3185 	 * a configuration space, but do not contain
3186 	 * PCI Express Capability registers, so bus width
3187 	 * must be hardcoded.
3188 	 */
3189 	if (bus->width == e1000_bus_width_unknown)
3190 		bus->width = e1000_bus_width_pcie_x1;
3191 
3192 	return ret_val;
3193 }
3194 
3195 /**
3196  *  e1000_reset_hw_ich8lan - Reset the hardware
3197  *  @hw: pointer to the HW structure
3198  *
3199  *  Does a full reset of the hardware which includes a reset of the PHY and
3200  *  MAC.
3201  **/
3202 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3203 {
3204 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3205 	u16 kum_cfg;
3206 	u32 ctrl, reg;
3207 	s32 ret_val;
3208 
3209 	DEBUGFUNC("e1000_reset_hw_ich8lan");
3210 
3211 	/*
3212 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
3213 	 * on the last TLP read/write transaction when MAC is reset.
3214 	 */
3215 	ret_val = e1000_disable_pcie_master_generic(hw);
3216 	if (ret_val)
3217 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
3218 
3219 	DEBUGOUT("Masking off all interrupts\n");
3220 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3221 
3222 	/*
3223 	 * Disable the Transmit and Receive units.  Then delay to allow
3224 	 * any pending transactions to complete before we hit the MAC
3225 	 * with the global reset.
3226 	 */
3227 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
3228 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3229 	E1000_WRITE_FLUSH(hw);
3230 
3231 	msec_delay(10);
3232 
3233 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3234 	if (hw->mac.type == e1000_ich8lan) {
3235 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3236 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3237 		/* Set Packet Buffer Size to 16k. */
3238 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3239 	}
3240 
3241 	if (hw->mac.type == e1000_pchlan) {
3242 		/* Save the NVM K1 bit setting*/
3243 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3244 		if (ret_val)
3245 			return ret_val;
3246 
3247 		if (kum_cfg & E1000_NVM_K1_ENABLE)
3248 			dev_spec->nvm_k1_enabled = TRUE;
3249 		else
3250 			dev_spec->nvm_k1_enabled = FALSE;
3251 	}
3252 
3253 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3254 
3255 	if (!hw->phy.ops.check_reset_block(hw)) {
3256 		/*
3257 		 * Full-chip reset requires MAC and PHY reset at the same
3258 		 * time to make sure the interface between MAC and the
3259 		 * external PHY is reset.
3260 		 */
3261 		ctrl |= E1000_CTRL_PHY_RST;
3262 
3263 		/*
3264 		 * Gate automatic PHY configuration by hardware on
3265 		 * non-managed 82579
3266 		 */
3267 		if ((hw->mac.type == e1000_pch2lan) &&
3268 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3269 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3270 	}
3271 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3272 	DEBUGOUT("Issuing a global reset to ich8lan\n");
3273 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3274 	/* cannot issue a flush here because it hangs the hardware */
3275 	msec_delay(20);
3276 
3277 	/* Set Phy Config Counter to 50msec */
3278 	if (hw->mac.type == e1000_pch2lan) {
3279 		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
3280 		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3281 		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3282 		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
3283 	}
3284 
3285 	if (ctrl & E1000_CTRL_PHY_RST) {
3286 		ret_val = hw->phy.ops.get_cfg_done(hw);
3287 		if (ret_val)
3288 			return ret_val;
3289 
3290 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3291 		if (ret_val)
3292 			return ret_val;
3293 	}
3294 
3295 	/*
3296 	 * For PCH, this write will make sure that any noise
3297 	 * will be detected as a CRC error and be dropped rather than show up
3298 	 * as a bad packet to the DMA engine.
3299 	 */
3300 	if (hw->mac.type == e1000_pchlan)
3301 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3302 
3303 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3304 	E1000_READ_REG(hw, E1000_ICR);
3305 
3306 	reg = E1000_READ_REG(hw, E1000_KABGTXD);
3307 	reg |= E1000_KABGTXD_BGSQLBIAS;
3308 	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
3309 
3310 	return E1000_SUCCESS;
3311 }
3312 
3313 /**
3314  *  e1000_init_hw_ich8lan - Initialize the hardware
3315  *  @hw: pointer to the HW structure
3316  *
3317  *  Prepares the hardware for transmit and receive by doing the following:
3318  *   - initialize hardware bits
3319  *   - initialize LED identification
3320  *   - setup receive address registers
3321  *   - setup flow control
3322  *   - setup transmit descriptors
3323  *   - clear statistics
3324  **/
3325 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3326 {
3327 	struct e1000_mac_info *mac = &hw->mac;
3328 	u32 ctrl_ext, txdctl, snoop;
3329 	s32 ret_val;
3330 	u16 i;
3331 
3332 	DEBUGFUNC("e1000_init_hw_ich8lan");
3333 
3334 	e1000_initialize_hw_bits_ich8lan(hw);
3335 
3336 	/* Initialize identification LED */
3337 	ret_val = mac->ops.id_led_init(hw);
3338 	if (ret_val)
3339 		DEBUGOUT("Error initializing identification LED\n");
3340 		/* This is not fatal and we should not stop init due to this */
3341 
3342 	/* Setup the receive address. */
3343 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3344 
3345 	/* Zero out the Multicast HASH table */
3346 	DEBUGOUT("Zeroing the MTA\n");
3347 	for (i = 0; i < mac->mta_reg_count; i++)
3348 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3349 
3350 	/*
3351 	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3352 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
3353 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3354 	 */
3355 	if (hw->phy.type == e1000_phy_82578) {
3356 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3357 		i &= ~BM_WUC_HOST_WU_BIT;
3358 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3359 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
3360 		if (ret_val)
3361 			return ret_val;
3362 	}
3363 
3364 	/* Setup link and flow control */
3365 	ret_val = mac->ops.setup_link(hw);
3366 
3367 	/* Set the transmit descriptor write-back policy for both queues */
3368 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3369 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3370 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3371 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3372 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3373 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3374 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3375 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3376 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3377 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3378 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3379 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3380 
3381 	/*
3382 	 * ICH8 has opposite polarity of no_snoop bits.
3383 	 * By default, we should use snoop behavior.
3384 	 */
3385 	if (mac->type == e1000_ich8lan)
3386 		snoop = PCIE_ICH8_SNOOP_ALL;
3387 	else
3388 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3389 	e1000_set_pcie_no_snoop_generic(hw, snoop);
3390 
3391 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3392 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3393 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3394 
3395 	/*
3396 	 * Clear all of the statistics registers (clear on read).  It is
3397 	 * important that we do this after we have tried to establish link
3398 	 * because the symbol error count will increment wildly if there
3399 	 * is no link.
3400 	 */
3401 	e1000_clear_hw_cntrs_ich8lan(hw);
3402 
3403 	return ret_val;
3404 }
3405 /**
3406  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3407  *  @hw: pointer to the HW structure
3408  *
3409  *  Sets/Clears required hardware bits necessary for correctly setting up the
3410  *  hardware for transmit and receive.
3411  **/
3412 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3413 {
3414 	u32 reg;
3415 
3416 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3417 
3418 	/* Extended Device Control */
3419 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3420 	reg |= (1 << 22);
3421 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3422 	if (hw->mac.type >= e1000_pchlan)
3423 		reg |= E1000_CTRL_EXT_PHYPDEN;
3424 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3425 
3426 	/* Transmit Descriptor Control 0 */
3427 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3428 	reg |= (1 << 22);
3429 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3430 
3431 	/* Transmit Descriptor Control 1 */
3432 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3433 	reg |= (1 << 22);
3434 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3435 
3436 	/* Transmit Arbitration Control 0 */
3437 	reg = E1000_READ_REG(hw, E1000_TARC(0));
3438 	if (hw->mac.type == e1000_ich8lan)
3439 		reg |= (1 << 28) | (1 << 29);
3440 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3441 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3442 
3443 	/* Transmit Arbitration Control 1 */
3444 	reg = E1000_READ_REG(hw, E1000_TARC(1));
3445 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3446 		reg &= ~(1 << 28);
3447 	else
3448 		reg |= (1 << 28);
3449 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
3450 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3451 
3452 	/* Device Status */
3453 	if (hw->mac.type == e1000_ich8lan) {
3454 		reg = E1000_READ_REG(hw, E1000_STATUS);
3455 		reg &= ~(1 << 31);
3456 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
3457 	}
3458 
3459 	/*
3460 	 * work-around descriptor data corruption issue during nfs v2 udp
3461 	 * traffic, just disable the nfs filtering capability
3462 	 */
3463 	reg = E1000_READ_REG(hw, E1000_RFCTL);
3464 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3465 	/*
3466 	 * Disable IPv6 extension header parsing because some malformed
3467 	 * IPv6 headers can hang the Rx.
3468 	 */
3469 	if (hw->mac.type == e1000_ich8lan)
3470 		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3471 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3472 
3473 	return;
3474 }
3475 
3476 /**
3477  *  e1000_setup_link_ich8lan - Setup flow control and link settings
3478  *  @hw: pointer to the HW structure
3479  *
3480  *  Determines which flow control settings to use, then configures flow
3481  *  control.  Calls the appropriate media-specific link configuration
3482  *  function.  Assuming the adapter has a valid link partner, a valid link
3483  *  should be established.  Assumes the hardware has previously been reset
3484  *  and the transmitter and receiver are not enabled.
3485  **/
3486 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3487 {
3488 	s32 ret_val;
3489 
3490 	DEBUGFUNC("e1000_setup_link_ich8lan");
3491 
3492 	if (hw->phy.ops.check_reset_block(hw))
3493 		return E1000_SUCCESS;
3494 
3495 	/*
3496 	 * ICH parts do not have a word in the NVM to determine
3497 	 * the default flow control setting, so we explicitly
3498 	 * set it to full.
3499 	 */
3500 	if (hw->fc.requested_mode == e1000_fc_default)
3501 		hw->fc.requested_mode = e1000_fc_full;
3502 
3503 	/*
3504 	 * Save off the requested flow control mode for use later.  Depending
3505 	 * on the link partner's capabilities, we may or may not use this mode.
3506 	 */
3507 	hw->fc.current_mode = hw->fc.requested_mode;
3508 
3509 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3510 		hw->fc.current_mode);
3511 
3512 	/* Continue to configure the copper link. */
3513 	ret_val = hw->mac.ops.setup_physical_interface(hw);
3514 	if (ret_val)
3515 		return ret_val;
3516 
3517 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3518 	if ((hw->phy.type == e1000_phy_82578) ||
3519 	    (hw->phy.type == e1000_phy_82579) ||
3520 	    (hw->phy.type == e1000_phy_82577)) {
3521 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3522 
3523 		ret_val = hw->phy.ops.write_reg(hw,
3524 					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
3525 					     hw->fc.pause_time);
3526 		if (ret_val)
3527 			return ret_val;
3528 	}
3529 
3530 	return e1000_set_fc_watermarks_generic(hw);
3531 }
3532 
3533 /**
3534  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3535  *  @hw: pointer to the HW structure
3536  *
3537  *  Configures the kumeran interface to the PHY to wait the appropriate time
3538  *  when polling the PHY, then call the generic setup_copper_link to finish
3539  *  configuring the copper link.
3540  **/
3541 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3542 {
3543 	u32 ctrl;
3544 	s32 ret_val;
3545 	u16 reg_data;
3546 
3547 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3548 
3549 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3550 	ctrl |= E1000_CTRL_SLU;
3551 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3552 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3553 
3554 	/*
3555 	 * Set the mac to wait the maximum time between each iteration
3556 	 * and increase the max iterations when polling the phy;
3557 	 * this fixes erroneous timeouts at 10Mbps.
3558 	 */
3559 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3560 					       0xFFFF);
3561 	if (ret_val)
3562 		return ret_val;
3563 	ret_val = e1000_read_kmrn_reg_generic(hw,
3564 					      E1000_KMRNCTRLSTA_INBAND_PARAM,
3565 					      &reg_data);
3566 	if (ret_val)
3567 		return ret_val;
3568 	reg_data |= 0x3F;
3569 	ret_val = e1000_write_kmrn_reg_generic(hw,
3570 					       E1000_KMRNCTRLSTA_INBAND_PARAM,
3571 					       reg_data);
3572 	if (ret_val)
3573 		return ret_val;
3574 
3575 	switch (hw->phy.type) {
3576 	case e1000_phy_igp_3:
3577 		ret_val = e1000_copper_link_setup_igp(hw);
3578 		if (ret_val)
3579 			return ret_val;
3580 		break;
3581 	case e1000_phy_bm:
3582 	case e1000_phy_82578:
3583 		ret_val = e1000_copper_link_setup_m88(hw);
3584 		if (ret_val)
3585 			return ret_val;
3586 		break;
3587 	case e1000_phy_82577:
3588 	case e1000_phy_82579:
3589 		ret_val = e1000_copper_link_setup_82577(hw);
3590 		if (ret_val)
3591 			return ret_val;
3592 		break;
3593 	case e1000_phy_ife:
3594 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3595 					       &reg_data);
3596 		if (ret_val)
3597 			return ret_val;
3598 
3599 		reg_data &= ~IFE_PMC_AUTO_MDIX;
3600 
3601 		switch (hw->phy.mdix) {
3602 		case 1:
3603 			reg_data &= ~IFE_PMC_FORCE_MDIX;
3604 			break;
3605 		case 2:
3606 			reg_data |= IFE_PMC_FORCE_MDIX;
3607 			break;
3608 		case 0:
3609 		default:
3610 			reg_data |= IFE_PMC_AUTO_MDIX;
3611 			break;
3612 		}
3613 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3614 						reg_data);
3615 		if (ret_val)
3616 			return ret_val;
3617 		break;
3618 	default:
3619 		break;
3620 	}
3621 
3622 	return e1000_setup_copper_link_generic(hw);
3623 }
3624 
3625 /**
3626  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3627  *  @hw: pointer to the HW structure
3628  *  @speed: pointer to store current link speed
3629  *  @duplex: pointer to store the current link duplex
3630  *
3631  *  Calls the generic get_speed_and_duplex to retrieve the current link
3632  *  information and then calls the Kumeran lock loss workaround for links at
3633  *  gigabit speeds.
3634  **/
3635 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3636 					  u16 *duplex)
3637 {
3638 	s32 ret_val;
3639 
3640 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3641 
3642 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3643 	if (ret_val)
3644 		return ret_val;
3645 
3646 	if ((hw->mac.type == e1000_ich8lan) &&
3647 	    (hw->phy.type == e1000_phy_igp_3) &&
3648 	    (*speed == SPEED_1000)) {
3649 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3650 	}
3651 
3652 	return ret_val;
3653 }
3654 
3655 /**
3656  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3657  *  @hw: pointer to the HW structure
3658  *
3659  *  Work-around for 82566 Kumeran PCS lock loss:
3660  *  On link status change (i.e. PCI reset, speed change) and link is up and
3661  *  speed is gigabit-
3662  *    0) if workaround is optionally disabled do nothing
3663  *    1) wait 1ms for Kumeran link to come up
3664  *    2) check Kumeran Diagnostic register PCS lock loss bit
3665  *    3) if not set the link is locked (all is good), otherwise...
3666  *    4) reset the PHY
3667  *    5) repeat up to 10 times
3668  *  Note: this is only called for IGP3 copper when speed is 1gb.
3669  **/
3670 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3671 {
3672 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3673 	u32 phy_ctrl;
3674 	s32 ret_val;
3675 	u16 i, data;
3676 	bool link;
3677 
3678 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3679 
3680 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3681 		return E1000_SUCCESS;
3682 
3683 	/*
3684 	 * Make sure link is up before proceeding.  If not just return.
3685 	 * Attempting this while link is negotiating fouled up link
3686 	 * stability
3687 	 */
3688 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3689 	if (!link)
3690 		return E1000_SUCCESS;
3691 
3692 	for (i = 0; i < 10; i++) {
3693 		/* read once to clear */
3694 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3695 		if (ret_val)
3696 			return ret_val;
3697 		/* and again to get new status */
3698 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3699 		if (ret_val)
3700 			return ret_val;
3701 
3702 		/* check for PCS lock */
3703 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3704 			return E1000_SUCCESS;
3705 
3706 		/* Issue PHY reset */
3707 		hw->phy.ops.reset(hw);
3708 		msec_delay_irq(5);
3709 	}
3710 	/* Disable GigE link negotiation */
3711 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3712 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3713 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3714 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3715 
3716 	/*
3717 	 * Call gig speed drop workaround on Gig disable before accessing
3718 	 * any PHY registers
3719 	 */
3720 	e1000_gig_downshift_workaround_ich8lan(hw);
3721 
3722 	/* unable to acquire PCS lock */
3723 	return -E1000_ERR_PHY;
3724 }
3725 
3726 /**
3727  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3728  *  @hw: pointer to the HW structure
3729  *  @state: boolean value used to set the current Kumeran workaround state
3730  *
3731  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
3732  *  /disabled - FALSE).
3733  **/
3734 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3735 						 bool state)
3736 {
3737 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3738 
3739 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3740 
3741 	if (hw->mac.type != e1000_ich8lan) {
3742 		DEBUGOUT("Workaround applies to ICH8 only.\n");
3743 		return;
3744 	}
3745 
3746 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
3747 
3748 	return;
3749 }
3750 
3751 /**
3752  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3753  *  @hw: pointer to the HW structure
3754  *
3755  *  Workaround for 82566 power-down on D3 entry:
3756  *    1) disable gigabit link
3757  *    2) write VR power-down enable
3758  *    3) read it back
3759  *  Continue if successful, else issue LCD reset and repeat
3760  **/
3761 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3762 {
3763 	u32 reg;
3764 	u16 data;
3765 	u8  retry = 0;
3766 
3767 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3768 
3769 	if (hw->phy.type != e1000_phy_igp_3)
3770 		return;
3771 
3772 	/* Try the workaround twice (if needed) */
3773 	do {
3774 		/* Disable link */
3775 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3776 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3777 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3778 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3779 
3780 		/*
3781 		 * Call gig speed drop workaround on Gig disable before
3782 		 * accessing any PHY registers
3783 		 */
3784 		if (hw->mac.type == e1000_ich8lan)
3785 			e1000_gig_downshift_workaround_ich8lan(hw);
3786 
3787 		/* Write VR power-down enable */
3788 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3789 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3790 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3791 				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3792 
3793 		/* Read it back and test */
3794 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3795 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3796 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3797 			break;
3798 
3799 		/* Issue PHY reset and repeat at most one more time */
3800 		reg = E1000_READ_REG(hw, E1000_CTRL);
3801 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3802 		retry++;
3803 	} while (retry);
3804 }
3805 
3806 /**
3807  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3808  *  @hw: pointer to the HW structure
3809  *
3810  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3811  *  LPLU, Gig disable, MDIC PHY reset):
3812  *    1) Set Kumeran Near-end loopback
3813  *    2) Clear Kumeran Near-end loopback
3814  *  Should only be called for ICH8[m] devices with any 1G Phy.
3815  **/
3816 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3817 {
3818 	s32 ret_val;
3819 	u16 reg_data;
3820 
3821 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3822 
3823 	if ((hw->mac.type != e1000_ich8lan) ||
3824 	    (hw->phy.type == e1000_phy_ife))
3825 		return;
3826 
3827 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3828 					      &reg_data);
3829 	if (ret_val)
3830 		return;
3831 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3832 	ret_val = e1000_write_kmrn_reg_generic(hw,
3833 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3834 					       reg_data);
3835 	if (ret_val)
3836 		return;
3837 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3838 	ret_val = e1000_write_kmrn_reg_generic(hw,
3839 					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3840 					       reg_data);
3841 }
3842 
3843 /**
3844  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3845  *  @hw: pointer to the HW structure
3846  *
3847  *  During S0 to Sx transition, it is possible the link remains at gig
3848  *  instead of negotiating to a lower speed.  Before going to Sx, set
3849  *  'Gig Disable' to force link speed negotiation to a lower speed based on
3850  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
3851  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3852  *  needs to be written.
3853  **/
3854 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3855 {
3856 	u32 phy_ctrl;
3857 	s32 ret_val;
3858 
3859 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
3860 
3861 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3862 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
3863 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3864 
3865 	if (hw->mac.type == e1000_ich8lan)
3866 		e1000_gig_downshift_workaround_ich8lan(hw);
3867 
3868 	if (hw->mac.type >= e1000_pchlan) {
3869 		e1000_oem_bits_config_ich8lan(hw, FALSE);
3870 
3871 		/* Reset PHY to activate OEM bits on 82577/8 */
3872 		if (hw->mac.type == e1000_pchlan)
3873 			e1000_phy_hw_reset_ich8lan(hw);
3874 
3875 		ret_val = hw->phy.ops.acquire(hw);
3876 		if (ret_val)
3877 			return;
3878 		e1000_write_smbus_addr(hw);
3879 		hw->phy.ops.release(hw);
3880 	}
3881 
3882 	return;
3883 }
3884 
3885 /**
3886  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
3887  *  @hw: pointer to the HW structure
3888  *
3889  *  During Sx to S0 transitions on non-managed devices or managed devices
3890  *  on which PHY resets are not blocked, if the PHY registers cannot be
3891  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
3892  *  the PHY.
3893  **/
3894 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3895 {
3896 	u16 phy_id1, phy_id2;
3897 	s32 ret_val;
3898 
3899 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
3900 
3901 	if (hw->mac.type < e1000_pch2lan)
3902 		return;
3903 
3904 	if (!(hw->phy.ops.check_reset_block(hw))) {
3905 		ret_val = hw->phy.ops.acquire(hw);
3906 		if (ret_val) {
3907 			DEBUGOUT("Failed to acquire PHY semaphore in resume\n");
3908 			return;
3909 		}
3910 
3911 		/* Test access to the PHY registers by reading the ID regs */
3912 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
3913 		if (ret_val)
3914 			goto err_release;
3915 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
3916 		if (ret_val)
3917 			goto err_release;
3918 
3919 		/* Toggle LANPHYPC iff above reads return unexpected values */
3920 		if (hw->phy.id != ((u32)(phy_id1 << 16) |
3921 				   (u32)(phy_id2 & PHY_REVISION_MASK))) {
3922 			e1000_toggle_lanphypc_value_ich8lan(hw);
3923 			hw->phy.ops.release(hw);
3924 			msec_delay(50);
3925 			hw->phy.ops.reset(hw);
3926 			msec_delay(50);
3927 		} else {
3928 			hw->phy.ops.release(hw);
3929 		}
3930 	}
3931 
3932 	return;
3933 
3934 err_release:
3935 	DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
3936 	hw->phy.ops.release(hw);
3937 }
3938 
3939 /**
3940  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
3941  *  @hw: pointer to the HW structure
3942  *
3943  *  Return the LED back to the default configuration.
3944  **/
3945 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3946 {
3947 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
3948 
3949 	if (hw->phy.type == e1000_phy_ife)
3950 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3951 					     0);
3952 
3953 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3954 	return E1000_SUCCESS;
3955 }
3956 
3957 /**
3958  *  e1000_led_on_ich8lan - Turn LEDs on
3959  *  @hw: pointer to the HW structure
3960  *
3961  *  Turn on the LEDs.
3962  **/
3963 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3964 {
3965 	DEBUGFUNC("e1000_led_on_ich8lan");
3966 
3967 	if (hw->phy.type == e1000_phy_ife)
3968 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3969 				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3970 
3971 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3972 	return E1000_SUCCESS;
3973 }
3974 
3975 /**
3976  *  e1000_led_off_ich8lan - Turn LEDs off
3977  *  @hw: pointer to the HW structure
3978  *
3979  *  Turn off the LEDs.
3980  **/
3981 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3982 {
3983 	DEBUGFUNC("e1000_led_off_ich8lan");
3984 
3985 	if (hw->phy.type == e1000_phy_ife)
3986 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3987 			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3988 
3989 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3990 	return E1000_SUCCESS;
3991 }
3992 
3993 /**
3994  *  e1000_setup_led_pchlan - Configures SW controllable LED
3995  *  @hw: pointer to the HW structure
3996  *
3997  *  This prepares the SW controllable LED for use.
3998  **/
3999 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4000 {
4001 	DEBUGFUNC("e1000_setup_led_pchlan");
4002 
4003 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4004 				     (u16)hw->mac.ledctl_mode1);
4005 }
4006 
4007 /**
4008  *  e1000_cleanup_led_pchlan - Restore the default LED operation
4009  *  @hw: pointer to the HW structure
4010  *
4011  *  Return the LED back to the default configuration.
4012  **/
4013 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4014 {
4015 	DEBUGFUNC("e1000_cleanup_led_pchlan");
4016 
4017 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4018 				     (u16)hw->mac.ledctl_default);
4019 }
4020 
4021 /**
4022  *  e1000_led_on_pchlan - Turn LEDs on
4023  *  @hw: pointer to the HW structure
4024  *
4025  *  Turn on the LEDs.
4026  **/
4027 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4028 {
4029 	u16 data = (u16)hw->mac.ledctl_mode2;
4030 	u32 i, led;
4031 
4032 	DEBUGFUNC("e1000_led_on_pchlan");
4033 
4034 	/*
4035 	 * If no link, then turn LED on by setting the invert bit
4036 	 * for each LED that's mode is "link_up" in ledctl_mode2.
4037 	 */
4038 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4039 		for (i = 0; i < 3; i++) {
4040 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4041 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4042 			    E1000_LEDCTL_MODE_LINK_UP)
4043 				continue;
4044 			if (led & E1000_PHY_LED0_IVRT)
4045 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4046 			else
4047 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4048 		}
4049 	}
4050 
4051 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4052 }
4053 
4054 /**
4055  *  e1000_led_off_pchlan - Turn LEDs off
4056  *  @hw: pointer to the HW structure
4057  *
4058  *  Turn off the LEDs.
4059  **/
4060 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4061 {
4062 	u16 data = (u16)hw->mac.ledctl_mode1;
4063 	u32 i, led;
4064 
4065 	DEBUGFUNC("e1000_led_off_pchlan");
4066 
4067 	/*
4068 	 * If no link, then turn LED off by clearing the invert bit
4069 	 * for each LED that's mode is "link_up" in ledctl_mode1.
4070 	 */
4071 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4072 		for (i = 0; i < 3; i++) {
4073 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4074 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4075 			    E1000_LEDCTL_MODE_LINK_UP)
4076 				continue;
4077 			if (led & E1000_PHY_LED0_IVRT)
4078 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4079 			else
4080 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4081 		}
4082 	}
4083 
4084 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4085 }
4086 
4087 /**
4088  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4089  *  @hw: pointer to the HW structure
4090  *
4091  *  Read appropriate register for the config done bit for completion status
4092  *  and configure the PHY through s/w for EEPROM-less parts.
4093  *
4094  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4095  *  config done bit, so only an error is logged and continues.  If we were
4096  *  to return with error, EEPROM-less silicon would not be able to be reset
4097  *  or change link.
4098  **/
4099 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4100 {
4101 	s32 ret_val = E1000_SUCCESS;
4102 	u32 bank = 0;
4103 	u32 status;
4104 
4105 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4106 
4107 	e1000_get_cfg_done_generic(hw);
4108 
4109 	/* Wait for indication from h/w that it has completed basic config */
4110 	if (hw->mac.type >= e1000_ich10lan) {
4111 		e1000_lan_init_done_ich8lan(hw);
4112 	} else {
4113 		ret_val = e1000_get_auto_rd_done_generic(hw);
4114 		if (ret_val) {
4115 			/*
4116 			 * When auto config read does not complete, do not
4117 			 * return with an error. This can happen in situations
4118 			 * where there is no eeprom and prevents getting link.
4119 			 */
4120 			DEBUGOUT("Auto Read Done did not complete\n");
4121 			ret_val = E1000_SUCCESS;
4122 		}
4123 	}
4124 
4125 	/* Clear PHY Reset Asserted bit */
4126 	status = E1000_READ_REG(hw, E1000_STATUS);
4127 	if (status & E1000_STATUS_PHYRA)
4128 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4129 	else
4130 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4131 
4132 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4133 	if (hw->mac.type <= e1000_ich9lan) {
4134 		if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4135 		    (hw->phy.type == e1000_phy_igp_3)) {
4136 			e1000_phy_init_script_igp3(hw);
4137 		}
4138 	} else {
4139 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4140 			/* Maybe we should do a basic PHY config */
4141 			DEBUGOUT("EEPROM not present\n");
4142 			ret_val = -E1000_ERR_CONFIG;
4143 		}
4144 	}
4145 
4146 	return ret_val;
4147 }
4148 
4149 /**
4150  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4151  * @hw: pointer to the HW structure
4152  *
4153  * In the case of a PHY power down to save power, or to turn off link during a
4154  * driver unload, or wake on lan is not enabled, remove the link.
4155  **/
4156 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4157 {
4158 	/* If the management interface is not enabled, then power down */
4159 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4160 	      hw->phy.ops.check_reset_block(hw)))
4161 		e1000_power_down_phy_copper(hw);
4162 
4163 	return;
4164 }
4165 
4166 /**
4167  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4168  *  @hw: pointer to the HW structure
4169  *
4170  *  Clears hardware counters specific to the silicon family and calls
4171  *  clear_hw_cntrs_generic to clear all general purpose counters.
4172  **/
4173 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4174 {
4175 	u16 phy_data;
4176 	s32 ret_val;
4177 
4178 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4179 
4180 	e1000_clear_hw_cntrs_base_generic(hw);
4181 
4182 	E1000_READ_REG(hw, E1000_ALGNERRC);
4183 	E1000_READ_REG(hw, E1000_RXERRC);
4184 	E1000_READ_REG(hw, E1000_TNCRS);
4185 	E1000_READ_REG(hw, E1000_CEXTERR);
4186 	E1000_READ_REG(hw, E1000_TSCTC);
4187 	E1000_READ_REG(hw, E1000_TSCTFC);
4188 
4189 	E1000_READ_REG(hw, E1000_MGTPRC);
4190 	E1000_READ_REG(hw, E1000_MGTPDC);
4191 	E1000_READ_REG(hw, E1000_MGTPTC);
4192 
4193 	E1000_READ_REG(hw, E1000_IAC);
4194 	E1000_READ_REG(hw, E1000_ICRXOC);
4195 
4196 	/* Clear PHY statistics registers */
4197 	if ((hw->phy.type == e1000_phy_82578) ||
4198 	    (hw->phy.type == e1000_phy_82579) ||
4199 	    (hw->phy.type == e1000_phy_82577)) {
4200 		ret_val = hw->phy.ops.acquire(hw);
4201 		if (ret_val)
4202 			return;
4203 		ret_val = hw->phy.ops.set_page(hw,
4204 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
4205 		if (ret_val)
4206 			goto release;
4207 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4208 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4209 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4210 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4211 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4212 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4213 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4214 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4215 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4216 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4217 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4218 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4219 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4220 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4221 release:
4222 		hw->phy.ops.release(hw);
4223 	}
4224 }
4225 
4226