xref: /dragonfly/sys/dev/netif/ig_hal/e1000_ich8lan.c (revision e96fb831)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2009, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD: $*/
34 
35 /*
36  * 82562G 10/100 Network Connection
37  * 82562G-2 10/100 Network Connection
38  * 82562GT 10/100 Network Connection
39  * 82562GT-2 10/100 Network Connection
40  * 82562V 10/100 Network Connection
41  * 82562V-2 10/100 Network Connection
42  * 82566DC-2 Gigabit Network Connection
43  * 82566DC Gigabit Network Connection
44  * 82566DM-2 Gigabit Network Connection
45  * 82566DM Gigabit Network Connection
46  * 82566MC Gigabit Network Connection
47  * 82566MM Gigabit Network Connection
48  * 82567LM Gigabit Network Connection
49  * 82567LF Gigabit Network Connection
50  * 82567V Gigabit Network Connection
51  * 82567LM-2 Gigabit Network Connection
52  * 82567LF-2 Gigabit Network Connection
53  * 82567V-2 Gigabit Network Connection
54  * 82567LF-3 Gigabit Network Connection
55  * 82567LM-3 Gigabit Network Connection
56  * 82567LM-4 Gigabit Network Connection
57  * 82577LM Gigabit Network Connection
58  * 82577LC Gigabit Network Connection
59  * 82578DM Gigabit Network Connection
60  * 82578DC Gigabit Network Connection
61  * 82579LM Gigabit Network Connection
62  * 82579V Gigabit Network Connection
63  */
64 
65 #include "e1000_api.h"
66 
67 static s32  e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32  e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32  e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
79 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
80 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
81 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
82                                             bool active);
83 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
84                                             bool active);
85 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
86                                    u16 words, u16 *data);
87 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
88                                     u16 words, u16 *data);
89 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
90 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
91 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
92                                             u16 *data);
93 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
94 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
95 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
96 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
97 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
98 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
99 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
100                                            u16 *speed, u16 *duplex);
101 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
102 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
103 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
104 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
105 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
106 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
107 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
108 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
109 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
111 static s32  e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
112 static s32  e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
113 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
114 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
116                                           u32 offset, u8 *data);
117 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
118                                           u8 size, u16 *data);
119 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
120                                           u32 offset, u16 *data);
121 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
122                                                  u32 offset, u8 byte);
123 static s32  e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
124                                            u32 offset, u8 data);
125 static s32  e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
126                                            u8 size, u16 data);
127 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
128 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
129 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
130 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
131 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
133 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
134 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
135 
136 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
137 /* Offset 04h HSFSTS */
138 union ich8_hws_flash_status {
139 	struct ich8_hsfsts {
140 		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
141 		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
142 		u16 dael       :1; /* bit 2 Direct Access error Log */
143 		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
144 		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
145 		u16 reserved1  :2; /* bit 13:6 Reserved */
146 		u16 reserved2  :6; /* bit 13:6 Reserved */
147 		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
148 		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
149 	} hsf_status;
150 	u16 regval;
151 };
152 
153 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
154 /* Offset 06h FLCTL */
155 union ich8_hws_flash_ctrl {
156 	struct ich8_hsflctl {
157 		u16 flcgo      :1;   /* 0 Flash Cycle Go */
158 		u16 flcycle    :2;   /* 2:1 Flash Cycle */
159 		u16 reserved   :5;   /* 7:3 Reserved  */
160 		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
161 		u16 flockdn    :6;   /* 15:10 Reserved */
162 	} hsf_ctrl;
163 	u16 regval;
164 };
165 
166 /* ICH Flash Region Access Permissions */
167 union ich8_hws_flash_regacc {
168 	struct ich8_flracc {
169 		u32 grra      :8; /* 0:7 GbE region Read Access */
170 		u32 grwa      :8; /* 8:15 GbE region Write Access */
171 		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
172 		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
173 	} hsf_flregacc;
174 	u16 regval;
175 };
176 
177 /**
178  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
179  *  @hw: pointer to the HW structure
180  *
181  *  Initialize family-specific PHY parameters and function pointers.
182  **/
183 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
184 {
185 	struct e1000_phy_info *phy = &hw->phy;
186 	u32 ctrl, fwsm;
187 	s32 ret_val = E1000_SUCCESS;
188 
189 	DEBUGFUNC("e1000_init_phy_params_pchlan");
190 
191 	phy->addr                     = 1;
192 	phy->reset_delay_us           = 100;
193 
194 	phy->ops.acquire              = e1000_acquire_swflag_ich8lan;
195 	phy->ops.check_reset_block    = e1000_check_reset_block_ich8lan;
196 	phy->ops.get_cfg_done         = e1000_get_cfg_done_ich8lan;
197 	phy->ops.read_reg             = e1000_read_phy_reg_hv;
198 	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
199 	phy->ops.release              = e1000_release_swflag_ich8lan;
200 	phy->ops.reset                = e1000_phy_hw_reset_ich8lan;
201 	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
202 	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
203 	phy->ops.write_reg            = e1000_write_phy_reg_hv;
204 	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
205 	phy->ops.power_up             = e1000_power_up_phy_copper;
206 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
207 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
208 
209 	/*
210 	 * The MAC-PHY interconnect may still be in SMBus mode
211 	 * after Sx->S0.  If the manageability engine (ME) is
212 	 * disabled, then toggle the LANPHYPC Value bit to force
213 	 * the interconnect to PCIe mode.
214 	 */
215 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
216 	if (!(fwsm & E1000_ICH_FWSM_FW_VALID) &&
217 	    !(hw->phy.ops.check_reset_block(hw))) {
218 		ctrl = E1000_READ_REG(hw, E1000_CTRL);
219 		ctrl |=  E1000_CTRL_LANPHYPC_OVERRIDE;
220 		ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
221 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
222 		usec_delay(10);
223 		ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
224 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
225 		msec_delay(50);
226 
227 		/*
228 		 * Gate automatic PHY configuration by hardware on
229 		 * non-managed 82579
230 		 */
231 		if (hw->mac.type == e1000_pch2lan)
232 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
233 	}
234 
235 	/*
236 	 * Reset the PHY before any acccess to it.  Doing so, ensures that
237 	 * the PHY is in a known good state before we read/write PHY registers.
238 	 * The generic reset is sufficient here, because we haven't determined
239 	 * the PHY type yet.
240 	 */
241 	ret_val = e1000_phy_hw_reset_generic(hw);
242 	if (ret_val)
243 		goto out;
244 
245 	/* Ungate automatic PHY configuration on non-managed 82579 */
246 	if ((hw->mac.type == e1000_pch2lan)  &&
247 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
248 		msec_delay(10);
249 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
250 	}
251 
252 	phy->id = e1000_phy_unknown;
253 	switch (hw->mac.type) {
254 	default:
255 		ret_val = e1000_get_phy_id(hw);
256 		if (ret_val)
257 			goto out;
258 		if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
259 			break;
260 		/* fall-through */
261 	case e1000_pch2lan:
262 		/*
263 		 * In case the PHY needs to be in mdio slow mode,
264 		 * set slow mode and try to get the PHY id again.
265 		 */
266 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
267 		if (ret_val)
268 			goto out;
269 		ret_val = e1000_get_phy_id(hw);
270 		if (ret_val)
271 			goto out;
272 		break;
273 	}
274 	phy->type = e1000_get_phy_type_from_id(phy->id);
275 
276 	switch (phy->type) {
277 	case e1000_phy_82577:
278 	case e1000_phy_82579:
279 		phy->ops.check_polarity = e1000_check_polarity_82577;
280 		phy->ops.force_speed_duplex =
281 			e1000_phy_force_speed_duplex_82577;
282 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
283 		phy->ops.get_info = e1000_get_phy_info_82577;
284 		phy->ops.commit = e1000_phy_sw_reset_generic;
285 		break;
286 	case e1000_phy_82578:
287 		phy->ops.check_polarity = e1000_check_polarity_m88;
288 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
289 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
290 		phy->ops.get_info = e1000_get_phy_info_m88;
291 		break;
292 	default:
293 		ret_val = -E1000_ERR_PHY;
294 		break;
295 	}
296 
297 out:
298 	return ret_val;
299 }
300 
301 /**
302  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
303  *  @hw: pointer to the HW structure
304  *
305  *  Initialize family-specific PHY parameters and function pointers.
306  **/
307 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
308 {
309 	struct e1000_phy_info *phy = &hw->phy;
310 	s32 ret_val = E1000_SUCCESS;
311 	u16 i = 0;
312 
313 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
314 
315 	phy->addr                     = 1;
316 	phy->reset_delay_us           = 100;
317 
318 	phy->ops.acquire              = e1000_acquire_swflag_ich8lan;
319 	phy->ops.check_reset_block    = e1000_check_reset_block_ich8lan;
320 	phy->ops.get_cable_length     = e1000_get_cable_length_igp_2;
321 	phy->ops.get_cfg_done         = e1000_get_cfg_done_ich8lan;
322 	phy->ops.read_reg             = e1000_read_phy_reg_igp;
323 	phy->ops.release              = e1000_release_swflag_ich8lan;
324 	phy->ops.reset                = e1000_phy_hw_reset_ich8lan;
325 	phy->ops.set_d0_lplu_state    = e1000_set_d0_lplu_state_ich8lan;
326 	phy->ops.set_d3_lplu_state    = e1000_set_d3_lplu_state_ich8lan;
327 	phy->ops.write_reg            = e1000_write_phy_reg_igp;
328 	phy->ops.power_up             = e1000_power_up_phy_copper;
329 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
330 
331 	/*
332 	 * We may need to do this twice - once for IGP and if that fails,
333 	 * we'll set BM func pointers and try again
334 	 */
335 	ret_val = e1000_determine_phy_address(hw);
336 	if (ret_val) {
337 		phy->ops.write_reg = e1000_write_phy_reg_bm;
338 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
339 		ret_val = e1000_determine_phy_address(hw);
340 		if (ret_val) {
341 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
342 			goto out;
343 		}
344 	}
345 
346 	phy->id = 0;
347 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
348 	       (i++ < 100)) {
349 		msec_delay(1);
350 		ret_val = e1000_get_phy_id(hw);
351 		if (ret_val)
352 			goto out;
353 	}
354 
355 	/* Verify phy id */
356 	switch (phy->id) {
357 	case IGP03E1000_E_PHY_ID:
358 		phy->type = e1000_phy_igp_3;
359 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
360 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
361 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
362 		phy->ops.get_info = e1000_get_phy_info_igp;
363 		phy->ops.check_polarity = e1000_check_polarity_igp;
364 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
365 		break;
366 	case IFE_E_PHY_ID:
367 	case IFE_PLUS_E_PHY_ID:
368 	case IFE_C_E_PHY_ID:
369 		phy->type = e1000_phy_ife;
370 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
371 		phy->ops.get_info = e1000_get_phy_info_ife;
372 		phy->ops.check_polarity = e1000_check_polarity_ife;
373 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
374 		break;
375 	case BME1000_E_PHY_ID:
376 		phy->type = e1000_phy_bm;
377 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
378 		phy->ops.read_reg = e1000_read_phy_reg_bm;
379 		phy->ops.write_reg = e1000_write_phy_reg_bm;
380 		phy->ops.commit = e1000_phy_sw_reset_generic;
381 		phy->ops.get_info = e1000_get_phy_info_m88;
382 		phy->ops.check_polarity = e1000_check_polarity_m88;
383 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
384 		break;
385 	default:
386 		ret_val = -E1000_ERR_PHY;
387 		goto out;
388 	}
389 
390 out:
391 	return ret_val;
392 }
393 
394 /**
395  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
396  *  @hw: pointer to the HW structure
397  *
398  *  Initialize family-specific NVM parameters and function
399  *  pointers.
400  **/
401 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
402 {
403 	struct e1000_nvm_info *nvm = &hw->nvm;
404 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
405 	u32 gfpreg, sector_base_addr, sector_end_addr;
406 	s32 ret_val = E1000_SUCCESS;
407 	u16 i;
408 
409 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
410 
411 	/* Can't read flash registers if the register set isn't mapped. */
412 	if (!hw->flash_address) {
413 		DEBUGOUT("ERROR: Flash registers not mapped\n");
414 		ret_val = -E1000_ERR_CONFIG;
415 		goto out;
416 	}
417 
418 	nvm->type = e1000_nvm_flash_sw;
419 
420 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
421 
422 	/*
423 	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
424 	 * Add 1 to sector_end_addr since this sector is included in
425 	 * the overall size.
426 	 */
427 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
428 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
429 
430 	/* flash_base_addr is byte-aligned */
431 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
432 
433 	/*
434 	 * find total size of the NVM, then cut in half since the total
435 	 * size represents two separate NVM banks.
436 	 */
437 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
438 	                          << FLASH_SECTOR_ADDR_SHIFT;
439 	nvm->flash_bank_size /= 2;
440 	/* Adjust to word count */
441 	nvm->flash_bank_size /= sizeof(u16);
442 
443 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
444 
445 	/* Clear shadow ram */
446 	for (i = 0; i < nvm->word_size; i++) {
447 		dev_spec->shadow_ram[i].modified = FALSE;
448 		dev_spec->shadow_ram[i].value    = 0xFFFF;
449 	}
450 
451 	/* Function Pointers */
452 	nvm->ops.acquire       = e1000_acquire_nvm_ich8lan;
453 	nvm->ops.release       = e1000_release_nvm_ich8lan;
454 	nvm->ops.read          = e1000_read_nvm_ich8lan;
455 	nvm->ops.update        = e1000_update_nvm_checksum_ich8lan;
456 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
457 	nvm->ops.validate      = e1000_validate_nvm_checksum_ich8lan;
458 	nvm->ops.write         = e1000_write_nvm_ich8lan;
459 
460 out:
461 	return ret_val;
462 }
463 
464 /**
465  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
466  *  @hw: pointer to the HW structure
467  *
468  *  Initialize family-specific MAC parameters and function
469  *  pointers.
470  **/
471 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
472 {
473 	struct e1000_mac_info *mac = &hw->mac;
474 	u16 pci_cfg;
475 
476 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
477 
478 	/* Set media type function pointer */
479 	hw->phy.media_type = e1000_media_type_copper;
480 
481 	/* Set mta register count */
482 	mac->mta_reg_count = 32;
483 	/* Set rar entry count */
484 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
485 	if (mac->type == e1000_ich8lan)
486 		mac->rar_entry_count--;
487 	/* Set if part includes ASF firmware */
488 	mac->asf_firmware_present = TRUE;
489 	/* FWSM register */
490 	mac->has_fwsm = TRUE;
491 	/* ARC subsystem not supported */
492 	mac->arc_subsystem_valid = FALSE;
493 	/* Adaptive IFS supported */
494 	mac->adaptive_ifs = TRUE;
495 
496 	/* Function pointers */
497 
498 	/* bus type/speed/width */
499 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
500 	/* function id */
501 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
502 	/* reset */
503 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
504 	/* hw initialization */
505 	mac->ops.init_hw = e1000_init_hw_ich8lan;
506 	/* link setup */
507 	mac->ops.setup_link = e1000_setup_link_ich8lan;
508 	/* physical interface setup */
509 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
510 	/* check for link */
511 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
512 	/* link info */
513 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
514 	/* multicast address update */
515 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
516 	/* clear hardware counters */
517 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
518 
519 	/* LED operations */
520 	switch (mac->type) {
521 	case e1000_ich8lan:
522 	case e1000_ich9lan:
523 	case e1000_ich10lan:
524 		/* check management mode */
525 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
526 		/* ID LED init */
527 		mac->ops.id_led_init = e1000_id_led_init_generic;
528 		/* blink LED */
529 		mac->ops.blink_led = e1000_blink_led_generic;
530 		/* setup LED */
531 		mac->ops.setup_led = e1000_setup_led_generic;
532 		/* cleanup LED */
533 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
534 		/* turn on/off LED */
535 		mac->ops.led_on = e1000_led_on_ich8lan;
536 		mac->ops.led_off = e1000_led_off_ich8lan;
537 		break;
538 	case e1000_pch2lan:
539 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
540 		mac->ops.rar_set = e1000_rar_set_pch2lan;
541 		/* fall-through */
542 	case e1000_pchlan:
543 		/* save PCH revision_id */
544 		e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
545 		hw->revision_id = (u8)(pci_cfg &= 0x000F);
546 		/* check management mode */
547 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
548 		/* ID LED init */
549 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
550 		/* setup LED */
551 		mac->ops.setup_led = e1000_setup_led_pchlan;
552 		/* cleanup LED */
553 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
554 		/* turn on/off LED */
555 		mac->ops.led_on = e1000_led_on_pchlan;
556 		mac->ops.led_off = e1000_led_off_pchlan;
557 		break;
558 	default:
559 		break;
560 	}
561 
562 	/* Enable PCS Lock-loss workaround for ICH8 */
563 	if (mac->type == e1000_ich8lan)
564 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
565 
566 	/* Gate automatic PHY configuration by hardware on managed 82579 */
567 	if ((mac->type == e1000_pch2lan) &&
568 	    (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
569 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
570 
571 	return E1000_SUCCESS;
572 }
573 
574 /**
575  *  e1000_set_eee_pchlan - Enable/disable EEE support
576  *  @hw: pointer to the HW structure
577  *
578  *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
579  *  the LPI Control register will remain set only if/when link is up.
580  **/
581 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
582 {
583 	s32 ret_val = E1000_SUCCESS;
584 	u16 phy_reg;
585 
586 	DEBUGFUNC("e1000_set_eee_pchlan");
587 
588 	if (hw->phy.type != e1000_phy_82579)
589 		goto out;
590 
591 	ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
592 	if (ret_val)
593 		goto out;
594 
595 	if (hw->dev_spec.ich8lan.eee_disable)
596 		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
597 	else
598 		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
599 
600 	ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
601 out:
602 	return ret_val;
603 }
604 
605 /**
606  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
607  *  @hw: pointer to the HW structure
608  *
609  *  Checks to see of the link status of the hardware has changed.  If a
610  *  change in link status has been detected, then we read the PHY registers
611  *  to get the current speed/duplex if link exists.
612  **/
613 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
614 {
615 	struct e1000_mac_info *mac = &hw->mac;
616 	s32 ret_val;
617 	bool link;
618 
619 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
620 
621 	/*
622 	 * We only want to go out to the PHY registers to see if Auto-Neg
623 	 * has completed and/or if our link status has changed.  The
624 	 * get_link_status flag is set upon receiving a Link Status
625 	 * Change or Rx Sequence Error interrupt.
626 	 */
627 	if (!mac->get_link_status) {
628 		ret_val = E1000_SUCCESS;
629 		goto out;
630 	}
631 
632 	/*
633 	 * First we want to see if the MII Status Register reports
634 	 * link.  If so, then we want to get the current speed/duplex
635 	 * of the PHY.
636 	 */
637 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
638 	if (ret_val)
639 		goto out;
640 
641 	if (hw->mac.type == e1000_pchlan) {
642 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
643 		if (ret_val)
644 			goto out;
645 	}
646 
647 	if (!link)
648 		goto out; /* No link detected */
649 
650 	mac->get_link_status = FALSE;
651 
652 	if (hw->phy.type == e1000_phy_82578) {
653 		ret_val = e1000_link_stall_workaround_hv(hw);
654 		if (ret_val)
655 			goto out;
656 	}
657 
658 	if (hw->mac.type == e1000_pch2lan) {
659 		ret_val = e1000_k1_workaround_lv(hw);
660 		if (ret_val)
661 			goto out;
662 	}
663 
664 	/*
665 	 * Check if there was DownShift, must be checked
666 	 * immediately after link-up
667 	 */
668 	e1000_check_downshift_generic(hw);
669 
670 	/* Enable/Disable EEE after link up */
671 	ret_val = e1000_set_eee_pchlan(hw);
672 	if (ret_val)
673 		goto out;
674 
675 	/*
676 	 * If we are forcing speed/duplex, then we simply return since
677 	 * we have already determined whether we have link or not.
678 	 */
679 	if (!mac->autoneg) {
680 		ret_val = -E1000_ERR_CONFIG;
681 		goto out;
682 	}
683 
684 	/*
685 	 * Auto-Neg is enabled.  Auto Speed Detection takes care
686 	 * of MAC speed/duplex configuration.  So we only need to
687 	 * configure Collision Distance in the MAC.
688 	 */
689 	e1000_config_collision_dist_generic(hw);
690 
691 	/*
692 	 * Configure Flow Control now that Auto-Neg has completed.
693 	 * First, we need to restore the desired flow control
694 	 * settings because we may have had to re-autoneg with a
695 	 * different link partner.
696 	 */
697 	ret_val = e1000_config_fc_after_link_up_generic(hw);
698 	if (ret_val)
699 		DEBUGOUT("Error configuring flow control\n");
700 
701 out:
702 	return ret_val;
703 }
704 
705 /**
706  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
707  *  @hw: pointer to the HW structure
708  *
709  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
710  **/
711 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
712 {
713 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
714 
715 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
716 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
717 	switch (hw->mac.type) {
718 	case e1000_ich8lan:
719 	case e1000_ich9lan:
720 	case e1000_ich10lan:
721 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
722 		break;
723 	case e1000_pchlan:
724 	case e1000_pch2lan:
725 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
726 		break;
727 	default:
728 		break;
729 	}
730 }
731 
732 /**
733  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
734  *  @hw: pointer to the HW structure
735  *
736  *  Acquires the mutex for performing NVM operations.
737  **/
738 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
739 {
740 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
741 	return E1000_SUCCESS;
742 }
743 
744 /**
745  *  e1000_release_nvm_ich8lan - Release NVM mutex
746  *  @hw: pointer to the HW structure
747  *
748  *  Releases the mutex used while performing NVM operations.
749  **/
750 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
751 {
752 	DEBUGFUNC("e1000_release_nvm_ich8lan");
753 	return;
754 }
755 
756 /**
757  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
758  *  @hw: pointer to the HW structure
759  *
760  *  Acquires the software control flag for performing PHY and select
761  *  MAC CSR accesses.
762  **/
763 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
764 {
765 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
766 	s32 ret_val = E1000_SUCCESS;
767 
768 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
769 
770 	while (timeout) {
771 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
772 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
773 			break;
774 
775 		msec_delay_irq(1);
776 		timeout--;
777 	}
778 
779 	if (!timeout) {
780 		DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
781 		ret_val = -E1000_ERR_CONFIG;
782 		goto out;
783 	}
784 
785 	timeout = SW_FLAG_TIMEOUT;
786 
787 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
788 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
789 
790 	while (timeout) {
791 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
792 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
793 			break;
794 
795 		msec_delay_irq(1);
796 		timeout--;
797 	}
798 
799 	if (!timeout) {
800 		DEBUGOUT("Failed to acquire the semaphore.\n");
801 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
802 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
803 		ret_val = -E1000_ERR_CONFIG;
804 		goto out;
805 	}
806 
807 out:
808 	return ret_val;
809 }
810 
811 /**
812  *  e1000_release_swflag_ich8lan - Release software control flag
813  *  @hw: pointer to the HW structure
814  *
815  *  Releases the software control flag for performing PHY and select
816  *  MAC CSR accesses.
817  **/
818 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
819 {
820 	u32 extcnf_ctrl;
821 
822 	DEBUGFUNC("e1000_release_swflag_ich8lan");
823 
824 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
825 	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
826 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
827 
828 	return;
829 }
830 
831 /**
832  *  e1000_check_mng_mode_ich8lan - Checks management mode
833  *  @hw: pointer to the HW structure
834  *
835  *  This checks if the adapter has any manageability enabled.
836  *  This is a function pointer entry point only called by read/write
837  *  routines for the PHY and NVM parts.
838  **/
839 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
840 {
841 	u32 fwsm;
842 
843 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
844 
845 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
846 
847 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
848 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
849 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
850 }
851 
852 /**
853  *  e1000_check_mng_mode_pchlan - Checks management mode
854  *  @hw: pointer to the HW structure
855  *
856  *  This checks if the adapter has iAMT enabled.
857  *  This is a function pointer entry point only called by read/write
858  *  routines for the PHY and NVM parts.
859  **/
860 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
861 {
862 	u32 fwsm;
863 
864 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
865 
866 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
867 
868 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
869 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
870 }
871 
872 /**
873  *  e1000_rar_set_pch2lan - Set receive address register
874  *  @hw: pointer to the HW structure
875  *  @addr: pointer to the receive address
876  *  @index: receive address array register
877  *
878  *  Sets the receive address array register at index to the address passed
879  *  in by addr.  For 82579, RAR[0] is the base address register that is to
880  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
881  *  Use SHRA[0-3] in place of those reserved for ME.
882  **/
883 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
884 {
885 	u32 rar_low, rar_high;
886 
887 	DEBUGFUNC("e1000_rar_set_pch2lan");
888 
889 	/*
890 	 * HW expects these in little endian so we reverse the byte order
891 	 * from network order (big endian) to little endian
892 	 */
893 	rar_low = ((u32) addr[0] |
894 	           ((u32) addr[1] << 8) |
895 	           ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
896 
897 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
898 
899 	/* If MAC address zero, no need to set the AV bit */
900 	if (rar_low || rar_high)
901 		rar_high |= E1000_RAH_AV;
902 
903 	if (index == 0) {
904 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
905 		E1000_WRITE_FLUSH(hw);
906 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
907 		E1000_WRITE_FLUSH(hw);
908 		return;
909 	}
910 
911 	if (index < hw->mac.rar_entry_count) {
912 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
913 		E1000_WRITE_FLUSH(hw);
914 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
915 		E1000_WRITE_FLUSH(hw);
916 
917 		/* verify the register updates */
918 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
919 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
920 			return;
921 
922 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
923 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
924 	}
925 
926 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
927 }
928 
929 /**
930  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
931  *  @hw: pointer to the HW structure
932  *
933  *  Checks if firmware is blocking the reset of the PHY.
934  *  This is a function pointer entry point only called by
935  *  reset routines.
936  **/
937 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
938 {
939 	u32 fwsm;
940 
941 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
942 
943 	if (hw->phy.reset_disable)
944 		return E1000_BLK_PHY_RESET;
945 
946 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
947 
948 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
949 	                                        : E1000_BLK_PHY_RESET;
950 }
951 
952 /**
953  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
954  *  @hw: pointer to the HW structure
955  *
956  *  Assumes semaphore already acquired.
957  *
958  **/
959 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
960 {
961 	u16 phy_data;
962 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
963 	s32 ret_val = E1000_SUCCESS;
964 
965 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
966 
967 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
968 	if (ret_val)
969 		goto out;
970 
971 	phy_data &= ~HV_SMB_ADDR_MASK;
972 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
973 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
974 	ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
975 
976 out:
977 	return ret_val;
978 }
979 
980 /**
981  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
982  *  @hw:   pointer to the HW structure
983  *
984  *  SW should configure the LCD from the NVM extended configuration region
985  *  as a workaround for certain parts.
986  **/
987 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
988 {
989 	struct e1000_phy_info *phy = &hw->phy;
990 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
991 	s32 ret_val = E1000_SUCCESS;
992 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
993 
994 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
995 
996 	/*
997 	 * Initialize the PHY from the NVM on ICH platforms.  This
998 	 * is needed due to an issue where the NVM configuration is
999 	 * not properly autoloaded after power transitions.
1000 	 * Therefore, after each PHY reset, we will load the
1001 	 * configuration data out of the NVM manually.
1002 	 */
1003 	switch (hw->mac.type) {
1004 	case e1000_ich8lan:
1005 		if (phy->type != e1000_phy_igp_3)
1006 			return ret_val;
1007 
1008 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1009 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1010 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1011 			break;
1012 		}
1013 		/* Fall-thru */
1014 	case e1000_pchlan:
1015 	case e1000_pch2lan:
1016 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1017 		break;
1018 	default:
1019 		return ret_val;
1020 	}
1021 
1022 	ret_val = hw->phy.ops.acquire(hw);
1023 	if (ret_val)
1024 		return ret_val;
1025 
1026 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
1027 	if (!(data & sw_cfg_mask))
1028 		goto out;
1029 
1030 	/*
1031 	 * Make sure HW does not configure LCD from PHY
1032 	 * extended configuration before SW configuration
1033 	 */
1034 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1035 	if (!(hw->mac.type == e1000_pch2lan)) {
1036 		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1037 			goto out;
1038 	}
1039 
1040 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1041 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1042 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1043 	if (!cnf_size)
1044 		goto out;
1045 
1046 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1047 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1048 
1049 	if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1050 	    (hw->mac.type == e1000_pchlan)) ||
1051 	     (hw->mac.type == e1000_pch2lan)) {
1052 		/*
1053 		 * HW configures the SMBus address and LEDs when the
1054 		 * OEM and LCD Write Enable bits are set in the NVM.
1055 		 * When both NVM bits are cleared, SW will configure
1056 		 * them instead.
1057 		 */
1058 		ret_val = e1000_write_smbus_addr(hw);
1059 		if (ret_val)
1060 			goto out;
1061 
1062 		data = E1000_READ_REG(hw, E1000_LEDCTL);
1063 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1064 							(u16)data);
1065 		if (ret_val)
1066 			goto out;
1067 	}
1068 
1069 	/* Configure LCD from extended configuration region. */
1070 
1071 	/* cnf_base_addr is in DWORD */
1072 	word_addr = (u16)(cnf_base_addr << 1);
1073 
1074 	for (i = 0; i < cnf_size; i++) {
1075 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1076 					   &reg_data);
1077 		if (ret_val)
1078 			goto out;
1079 
1080 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1081 					   1, &reg_addr);
1082 		if (ret_val)
1083 			goto out;
1084 
1085 		/* Save off the PHY page for future writes. */
1086 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1087 			phy_page = reg_data;
1088 			continue;
1089 		}
1090 
1091 		reg_addr &= PHY_REG_MASK;
1092 		reg_addr |= phy_page;
1093 
1094 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1095 						    reg_data);
1096 		if (ret_val)
1097 			goto out;
1098 	}
1099 
1100 out:
1101 	hw->phy.ops.release(hw);
1102 	return ret_val;
1103 }
1104 
1105 /**
1106  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1107  *  @hw:   pointer to the HW structure
1108  *  @link: link up bool flag
1109  *
1110  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1111  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1112  *  If link is down, the function will restore the default K1 setting located
1113  *  in the NVM.
1114  **/
1115 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1116 {
1117 	s32 ret_val = E1000_SUCCESS;
1118 	u16 status_reg = 0;
1119 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1120 
1121 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
1122 
1123 	if (hw->mac.type != e1000_pchlan)
1124 		goto out;
1125 
1126 	/* Wrap the whole flow with the sw flag */
1127 	ret_val = hw->phy.ops.acquire(hw);
1128 	if (ret_val)
1129 		goto out;
1130 
1131 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1132 	if (link) {
1133 		if (hw->phy.type == e1000_phy_82578) {
1134 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1135 			                                      &status_reg);
1136 			if (ret_val)
1137 				goto release;
1138 
1139 			status_reg &= BM_CS_STATUS_LINK_UP |
1140 			              BM_CS_STATUS_RESOLVED |
1141 			              BM_CS_STATUS_SPEED_MASK;
1142 
1143 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1144 			                   BM_CS_STATUS_RESOLVED |
1145 			                   BM_CS_STATUS_SPEED_1000))
1146 				k1_enable = FALSE;
1147 		}
1148 
1149 		if (hw->phy.type == e1000_phy_82577) {
1150 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1151 			                                      &status_reg);
1152 			if (ret_val)
1153 				goto release;
1154 
1155 			status_reg &= HV_M_STATUS_LINK_UP |
1156 			              HV_M_STATUS_AUTONEG_COMPLETE |
1157 			              HV_M_STATUS_SPEED_MASK;
1158 
1159 			if (status_reg == (HV_M_STATUS_LINK_UP |
1160 			                   HV_M_STATUS_AUTONEG_COMPLETE |
1161 			                   HV_M_STATUS_SPEED_1000))
1162 				k1_enable = FALSE;
1163 		}
1164 
1165 		/* Link stall fix for link up */
1166 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1167 		                                       0x0100);
1168 		if (ret_val)
1169 			goto release;
1170 
1171 	} else {
1172 		/* Link stall fix for link down */
1173 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1174 		                                       0x4100);
1175 		if (ret_val)
1176 			goto release;
1177 	}
1178 
1179 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1180 
1181 release:
1182 	hw->phy.ops.release(hw);
1183 out:
1184 	return ret_val;
1185 }
1186 
1187 /**
1188  *  e1000_configure_k1_ich8lan - Configure K1 power state
1189  *  @hw: pointer to the HW structure
1190  *  @enable: K1 state to configure
1191  *
1192  *  Configure the K1 power state based on the provided parameter.
1193  *  Assumes semaphore already acquired.
1194  *
1195  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1196  **/
1197 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1198 {
1199 	s32 ret_val = E1000_SUCCESS;
1200 	u32 ctrl_reg = 0;
1201 	u32 ctrl_ext = 0;
1202 	u32 reg = 0;
1203 	u16 kmrn_reg = 0;
1204 
1205 	DEBUGFUNC("e1000_configure_k1_ich8lan");
1206 
1207 	ret_val = e1000_read_kmrn_reg_locked(hw,
1208 	                                     E1000_KMRNCTRLSTA_K1_CONFIG,
1209 	                                     &kmrn_reg);
1210 	if (ret_val)
1211 		goto out;
1212 
1213 	if (k1_enable)
1214 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1215 	else
1216 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1217 
1218 	ret_val = e1000_write_kmrn_reg_locked(hw,
1219 	                                      E1000_KMRNCTRLSTA_K1_CONFIG,
1220 	                                      kmrn_reg);
1221 	if (ret_val)
1222 		goto out;
1223 
1224 	usec_delay(20);
1225 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1226 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1227 
1228 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1229 	reg |= E1000_CTRL_FRCSPD;
1230 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
1231 
1232 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1233 	usec_delay(20);
1234 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1235 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1236 	usec_delay(20);
1237 
1238 out:
1239 	return ret_val;
1240 }
1241 
1242 /**
1243  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1244  *  @hw:       pointer to the HW structure
1245  *  @d0_state: boolean if entering d0 or d3 device state
1246  *
1247  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1248  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1249  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1250  **/
1251 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1252 {
1253 	s32 ret_val = 0;
1254 	u32 mac_reg;
1255 	u16 oem_reg;
1256 
1257 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1258 
1259 	if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1260 		return ret_val;
1261 
1262 	ret_val = hw->phy.ops.acquire(hw);
1263 	if (ret_val)
1264 		return ret_val;
1265 
1266 	if (!(hw->mac.type == e1000_pch2lan)) {
1267 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1268 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1269 			goto out;
1270 	}
1271 
1272 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1273 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1274 		goto out;
1275 
1276 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1277 
1278 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1279 	if (ret_val)
1280 		goto out;
1281 
1282 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1283 
1284 	if (d0_state) {
1285 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1286 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1287 
1288 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1289 			oem_reg |= HV_OEM_BITS_LPLU;
1290 	} else {
1291 		if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1292 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1293 
1294 		if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1295 			oem_reg |= HV_OEM_BITS_LPLU;
1296 	}
1297 	/* Restart auto-neg to activate the bits */
1298 	if (!hw->phy.ops.check_reset_block(hw))
1299 		oem_reg |= HV_OEM_BITS_RESTART_AN;
1300 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1301 
1302 out:
1303 	hw->phy.ops.release(hw);
1304 
1305 	return ret_val;
1306 }
1307 
1308 
1309 /**
1310  *  e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
1311  *  @hw: pointer to the HW structure
1312  **/
1313 s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
1314 {
1315 	DEBUGFUNC("e1000_hv_phy_powerdown_workaround_ich8lan");
1316 
1317 	if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
1318 		return E1000_SUCCESS;
1319 
1320 	return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444);
1321 }
1322 
1323 /**
1324  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1325  *  @hw:   pointer to the HW structure
1326  **/
1327 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1328 {
1329 	s32 ret_val;
1330 	u16 data;
1331 
1332 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1333 
1334 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1335 	if (ret_val)
1336 		return ret_val;
1337 
1338 	data |= HV_KMRN_MDIO_SLOW;
1339 
1340 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1341 
1342 	return ret_val;
1343 }
1344 
1345 /**
1346  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1347  *  done after every PHY reset.
1348  **/
1349 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1350 {
1351 	s32 ret_val = E1000_SUCCESS;
1352 	u16 phy_data;
1353 
1354 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1355 
1356 	if (hw->mac.type != e1000_pchlan)
1357 		goto out;
1358 
1359 	/* Set MDIO slow mode before any other MDIO access */
1360 	if (hw->phy.type == e1000_phy_82577) {
1361 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
1362 		if (ret_val)
1363 			goto out;
1364 	}
1365 
1366 	/* Hanksville M Phy init for IEEE. */
1367 	if ((hw->revision_id == 2) &&
1368 	    (hw->phy.type == e1000_phy_82577) &&
1369 	    ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1370 		hw->phy.ops.write_reg(hw, 0x10, 0x8823);
1371 		hw->phy.ops.write_reg(hw, 0x11, 0x0018);
1372 		hw->phy.ops.write_reg(hw, 0x10, 0x8824);
1373 		hw->phy.ops.write_reg(hw, 0x11, 0x0016);
1374 		hw->phy.ops.write_reg(hw, 0x10, 0x8825);
1375 		hw->phy.ops.write_reg(hw, 0x11, 0x001A);
1376 		hw->phy.ops.write_reg(hw, 0x10, 0x888C);
1377 		hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1378 		hw->phy.ops.write_reg(hw, 0x10, 0x888D);
1379 		hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1380 		hw->phy.ops.write_reg(hw, 0x10, 0x888E);
1381 		hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1382 		hw->phy.ops.write_reg(hw, 0x10, 0x8827);
1383 		hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1384 		hw->phy.ops.write_reg(hw, 0x10, 0x8835);
1385 		hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1386 		hw->phy.ops.write_reg(hw, 0x10, 0x8834);
1387 		hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1388 		hw->phy.ops.write_reg(hw, 0x10, 0x8833);
1389 		hw->phy.ops.write_reg(hw, 0x11, 0x0002);
1390 	}
1391 
1392 	if (((hw->phy.type == e1000_phy_82577) &&
1393 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1394 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1395 		/* Disable generation of early preamble */
1396 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1397 		if (ret_val)
1398 			goto out;
1399 
1400 		/* Preamble tuning for SSC */
1401 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1402 		if (ret_val)
1403 			goto out;
1404 	}
1405 
1406 	if (hw->phy.type == e1000_phy_82578) {
1407 		if (hw->revision_id < 3) {
1408 			/* PHY config */
1409 			ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29,
1410 			                                0x66C0);
1411 			if (ret_val)
1412 				goto out;
1413 
1414 			/* PHY config */
1415 			ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E,
1416 			                                0xFFFF);
1417 			if (ret_val)
1418 				goto out;
1419 		}
1420 
1421 		/*
1422 		 * Return registers to default by doing a soft reset then
1423 		 * writing 0x3140 to the control register.
1424 		 */
1425 		if (hw->phy.revision < 2) {
1426 			e1000_phy_sw_reset_generic(hw);
1427 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1428 			                                0x3140);
1429 		}
1430 	}
1431 
1432 	if ((hw->revision_id == 2) &&
1433 	    (hw->phy.type == e1000_phy_82577) &&
1434 	    ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1435 		/*
1436 		 * Workaround for OEM (GbE) not operating after reset -
1437 		 * restart AN (twice)
1438 		 */
1439 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1440 		if (ret_val)
1441 			goto out;
1442 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1443 		if (ret_val)
1444 			goto out;
1445 	}
1446 
1447 	/* Select page 0 */
1448 	ret_val = hw->phy.ops.acquire(hw);
1449 	if (ret_val)
1450 		goto out;
1451 
1452 	hw->phy.addr = 1;
1453 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1454 	hw->phy.ops.release(hw);
1455 	if (ret_val)
1456 		goto out;
1457 
1458 	/*
1459 	 * Configure the K1 Si workaround during phy reset assuming there is
1460 	 * link so that it disables K1 if link is in 1Gbps.
1461 	 */
1462 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1463 	if (ret_val)
1464 		goto out;
1465 
1466 	/* Workaround for link disconnects on a busy hub in half duplex */
1467 	ret_val = hw->phy.ops.acquire(hw);
1468 	if (ret_val)
1469 		goto out;
1470 	ret_val = hw->phy.ops.read_reg_locked(hw,
1471 	                                      PHY_REG(BM_PORT_CTRL_PAGE, 17),
1472 	                                      &phy_data);
1473 	if (ret_val)
1474 		goto release;
1475 	ret_val = hw->phy.ops.write_reg_locked(hw,
1476 	                                       PHY_REG(BM_PORT_CTRL_PAGE, 17),
1477 	                                       phy_data & 0x00FF);
1478 release:
1479 	hw->phy.ops.release(hw);
1480 out:
1481 	return ret_val;
1482 }
1483 
1484 /**
1485  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1486  *  @hw:   pointer to the HW structure
1487  **/
1488 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1489 {
1490 	u32 mac_reg;
1491 	u16 i;
1492 
1493 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1494 
1495 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1496 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1497 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1498 		hw->phy.ops.write_reg(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
1499 		hw->phy.ops.write_reg(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
1500 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1501 		hw->phy.ops.write_reg(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
1502 		hw->phy.ops.write_reg(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
1503 	}
1504 }
1505 
1506 static u32 e1000_calc_rx_da_crc(u8 mac[])
1507 {
1508 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
1509 	u32 i, j, mask, crc;
1510 
1511 	DEBUGFUNC("e1000_calc_rx_da_crc");
1512 
1513 	crc = 0xffffffff;
1514 	for (i = 0; i < 6; i++) {
1515 		crc = crc ^ mac[i];
1516 		for (j = 8; j > 0; j--) {
1517 			mask = (crc & 1) * (-1);
1518 			crc = (crc >> 1) ^ (poly & mask);
1519 		}
1520 	}
1521 	return ~crc;
1522 }
1523 
1524 /**
1525  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1526  *  with 82579 PHY
1527  *  @hw: pointer to the HW structure
1528  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
1529  **/
1530 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1531 {
1532 	s32 ret_val = E1000_SUCCESS;
1533 	u16 phy_reg, data;
1534 	u32 mac_reg;
1535 	u16 i;
1536 
1537 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1538 
1539 	if (hw->mac.type != e1000_pch2lan)
1540 		goto out;
1541 
1542 	/* disable Rx path while enabling/disabling workaround */
1543 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1544 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1545 	if (ret_val)
1546 		goto out;
1547 
1548 	if (enable) {
1549 		/*
1550 		 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1551 		 * SHRAL/H) and initial CRC values to the MAC
1552 		 */
1553 		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1554 			u8 mac_addr[ETH_ADDR_LEN] = {0};
1555 			u32 addr_high, addr_low;
1556 
1557 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1558 			if (!(addr_high & E1000_RAH_AV))
1559 				continue;
1560 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1561 			mac_addr[0] = (addr_low & 0xFF);
1562 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
1563 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
1564 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
1565 			mac_addr[4] = (addr_high & 0xFF);
1566 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
1567 
1568 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1569 					e1000_calc_rx_da_crc(mac_addr));
1570 		}
1571 
1572 		/* Write Rx addresses to the PHY */
1573 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1574 
1575 		/* Enable jumbo frame workaround in the MAC */
1576 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1577 		mac_reg &= ~(1 << 14);
1578 		mac_reg |= (7 << 15);
1579 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1580 
1581 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1582 		mac_reg |= E1000_RCTL_SECRC;
1583 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1584 
1585 		ret_val = e1000_read_kmrn_reg_generic(hw,
1586 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1587 						&data);
1588 		if (ret_val)
1589 			goto out;
1590 		ret_val = e1000_write_kmrn_reg_generic(hw,
1591 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1592 						data | (1 << 0));
1593 		if (ret_val)
1594 			goto out;
1595 		ret_val = e1000_read_kmrn_reg_generic(hw,
1596 						E1000_KMRNCTRLSTA_HD_CTRL,
1597 						&data);
1598 		if (ret_val)
1599 			goto out;
1600 		data &= ~(0xF << 8);
1601 		data |= (0xB << 8);
1602 		ret_val = e1000_write_kmrn_reg_generic(hw,
1603 						E1000_KMRNCTRLSTA_HD_CTRL,
1604 						data);
1605 		if (ret_val)
1606 			goto out;
1607 
1608 		/* Enable jumbo frame workaround in the PHY */
1609 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1610 		data &= ~(0x7F << 5);
1611 		data |= (0x37 << 5);
1612 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1613 		if (ret_val)
1614 			goto out;
1615 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1616 		data &= ~(1 << 13);
1617 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1618 		if (ret_val)
1619 			goto out;
1620 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1621 		data &= ~(0x3FF << 2);
1622 		data |= (0x1A << 2);
1623 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1624 		if (ret_val)
1625 			goto out;
1626 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00);
1627 		if (ret_val)
1628 			goto out;
1629 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1630 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10));
1631 		if (ret_val)
1632 			goto out;
1633 	} else {
1634 		/* Write MAC register values back to h/w defaults */
1635 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1636 		mac_reg &= ~(0xF << 14);
1637 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1638 
1639 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1640 		mac_reg &= ~E1000_RCTL_SECRC;
1641 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1642 
1643 		ret_val = e1000_read_kmrn_reg_generic(hw,
1644 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1645 						&data);
1646 		if (ret_val)
1647 			goto out;
1648 		ret_val = e1000_write_kmrn_reg_generic(hw,
1649 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1650 						data & ~(1 << 0));
1651 		if (ret_val)
1652 			goto out;
1653 		ret_val = e1000_read_kmrn_reg_generic(hw,
1654 						E1000_KMRNCTRLSTA_HD_CTRL,
1655 						&data);
1656 		if (ret_val)
1657 			goto out;
1658 		data &= ~(0xF << 8);
1659 		data |= (0xB << 8);
1660 		ret_val = e1000_write_kmrn_reg_generic(hw,
1661 						E1000_KMRNCTRLSTA_HD_CTRL,
1662 						data);
1663 		if (ret_val)
1664 			goto out;
1665 
1666 		/* Write PHY register values back to h/w defaults */
1667 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1668 		data &= ~(0x7F << 5);
1669 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1670 		if (ret_val)
1671 			goto out;
1672 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1673 		data |= (1 << 13);
1674 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1675 		if (ret_val)
1676 			goto out;
1677 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1678 		data &= ~(0x3FF << 2);
1679 		data |= (0x8 << 2);
1680 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1681 		if (ret_val)
1682 			goto out;
1683 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1684 		if (ret_val)
1685 			goto out;
1686 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1687 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10));
1688 		if (ret_val)
1689 			goto out;
1690 	}
1691 
1692 	/* re-enable Rx path after enabling/disabling workaround */
1693 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1694 
1695 out:
1696 	return ret_val;
1697 }
1698 
1699 /**
1700  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1701  *  done after every PHY reset.
1702  **/
1703 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1704 {
1705 	s32 ret_val = E1000_SUCCESS;
1706 
1707 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1708 
1709 	if (hw->mac.type != e1000_pch2lan)
1710 		goto out;
1711 
1712 	/* Set MDIO slow mode before any other MDIO access */
1713 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
1714 
1715 out:
1716 	return ret_val;
1717 }
1718 
1719 /**
1720  *  e1000_k1_gig_workaround_lv - K1 Si workaround
1721  *  @hw:   pointer to the HW structure
1722  *
1723  *  Workaround to set the K1 beacon duration for 82579 parts
1724  **/
1725 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1726 {
1727 	s32 ret_val = E1000_SUCCESS;
1728 	u16 status_reg = 0;
1729 	u32 mac_reg;
1730 
1731 	DEBUGFUNC("e1000_k1_workaround_lv");
1732 
1733 	if (hw->mac.type != e1000_pch2lan)
1734 		goto out;
1735 
1736 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
1737 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1738 	if (ret_val)
1739 		goto out;
1740 
1741 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1742 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1743 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1744 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1745 
1746 		if (status_reg & HV_M_STATUS_SPEED_1000)
1747 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1748 		else
1749 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1750 
1751 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1752 	}
1753 
1754 out:
1755 	return ret_val;
1756 }
1757 
1758 /**
1759  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1760  *  @hw:   pointer to the HW structure
1761  *  @gate: boolean set to TRUE to gate, FALSE to un-gate
1762  *
1763  *  Gate/ungate the automatic PHY configuration via hardware; perform
1764  *  the configuration via software instead.
1765  **/
1766 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1767 {
1768 	u32 extcnf_ctrl;
1769 
1770 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1771 
1772 	if (hw->mac.type != e1000_pch2lan)
1773 		return;
1774 
1775 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1776 
1777 	if (gate)
1778 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1779 	else
1780 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1781 
1782 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1783 	return;
1784 }
1785 
1786 /**
1787  *  e1000_hv_phy_tuning_workaround_ich8lan - This is a Phy tuning work around
1788  *  needed for Nahum3 + Hanksville testing, requested by HW team
1789  **/
1790 static s32 e1000_hv_phy_tuning_workaround_ich8lan(struct e1000_hw *hw)
1791 {
1792 	s32 ret_val = E1000_SUCCESS;
1793 
1794 	DEBUGFUNC("e1000_hv_phy_tuning_workaround_ich8lan");
1795 
1796 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1797 	if (ret_val)
1798 		goto out;
1799 
1800 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1801 	if (ret_val)
1802 		goto out;
1803 
1804 	ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 0x66C0);
1805 	if (ret_val)
1806 		goto out;
1807 
1808 	ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 0xFFFF);
1809 
1810 out:
1811 	return ret_val;
1812 }
1813 
1814 /**
1815  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
1816  *  @hw: pointer to the HW structure
1817  *
1818  *  Check the appropriate indication the MAC has finished configuring the
1819  *  PHY after a software reset.
1820  **/
1821 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1822 {
1823 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1824 
1825 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
1826 
1827 	/* Wait for basic configuration completes before proceeding */
1828 	do {
1829 		data = E1000_READ_REG(hw, E1000_STATUS);
1830 		data &= E1000_STATUS_LAN_INIT_DONE;
1831 		usec_delay(100);
1832 	} while ((!data) && --loop);
1833 
1834 	/*
1835 	 * If basic configuration is incomplete before the above loop
1836 	 * count reaches 0, loading the configuration from NVM will
1837 	 * leave the PHY in a bad state possibly resulting in no link.
1838 	 */
1839 	if (loop == 0)
1840 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1841 
1842 	/* Clear the Init Done bit for the next init event */
1843 	data = E1000_READ_REG(hw, E1000_STATUS);
1844 	data &= ~E1000_STATUS_LAN_INIT_DONE;
1845 	E1000_WRITE_REG(hw, E1000_STATUS, data);
1846 }
1847 
1848 /**
1849  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1850  *  @hw: pointer to the HW structure
1851  **/
1852 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1853 {
1854 	s32 ret_val = E1000_SUCCESS;
1855 	u16 reg;
1856 
1857 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1858 
1859 	if (hw->phy.ops.check_reset_block(hw))
1860 		goto out;
1861 
1862 	/* Allow time for h/w to get to quiescent state after reset */
1863 	msec_delay(10);
1864 
1865 	/* Perform any necessary post-reset workarounds */
1866 	switch (hw->mac.type) {
1867 	case e1000_pchlan:
1868 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1869 		if (ret_val)
1870 			goto out;
1871 		break;
1872 	case e1000_pch2lan:
1873 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1874 		if (ret_val)
1875 			goto out;
1876 		break;
1877 	default:
1878 		break;
1879 	}
1880 
1881 	if (hw->device_id == E1000_DEV_ID_ICH10_HANKSVILLE) {
1882 		ret_val = e1000_hv_phy_tuning_workaround_ich8lan(hw);
1883 		if (ret_val)
1884 			goto out;
1885 	}
1886 
1887 	/* Dummy read to clear the phy wakeup bit after lcd reset */
1888 	if (hw->mac.type >= e1000_pchlan)
1889 		hw->phy.ops.read_reg(hw, BM_WUC, &reg);
1890 
1891 	/* Configure the LCD with the extended configuration region in NVM */
1892 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
1893 	if (ret_val)
1894 		goto out;
1895 
1896 	/* Configure the LCD with the OEM bits in NVM */
1897 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1898 
1899 	/* Ungate automatic PHY configuration on non-managed 82579 */
1900 	if ((hw->mac.type == e1000_pch2lan) &&
1901 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1902 		msec_delay(10);
1903 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1904 	}
1905 
1906 out:
1907 	return ret_val;
1908 }
1909 
1910 /**
1911  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1912  *  @hw: pointer to the HW structure
1913  *
1914  *  Resets the PHY
1915  *  This is a function pointer entry point called by drivers
1916  *  or other shared routines.
1917  **/
1918 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1919 {
1920 	s32 ret_val = E1000_SUCCESS;
1921 
1922 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1923 
1924 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
1925 	if ((hw->mac.type == e1000_pch2lan) &&
1926 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
1927 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
1928 
1929 	ret_val = e1000_phy_hw_reset_generic(hw);
1930 	if (ret_val)
1931 		goto out;
1932 
1933 	ret_val = e1000_post_phy_reset_ich8lan(hw);
1934 
1935 out:
1936 	return ret_val;
1937 }
1938 
1939 /**
1940  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1941  *  @hw: pointer to the HW structure
1942  *  @active: TRUE to enable LPLU, FALSE to disable
1943  *
1944  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
1945  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1946  *  the phy speed. This function will manually set the LPLU bit and restart
1947  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
1948  *  since it configures the same bit.
1949  **/
1950 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1951 {
1952 	s32 ret_val = E1000_SUCCESS;
1953 	u16 oem_reg;
1954 
1955 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
1956 
1957 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
1958 	if (ret_val)
1959 		goto out;
1960 
1961 	if (active)
1962 		oem_reg |= HV_OEM_BITS_LPLU;
1963 	else
1964 		oem_reg &= ~HV_OEM_BITS_LPLU;
1965 
1966 	oem_reg |= HV_OEM_BITS_RESTART_AN;
1967 	ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
1968 
1969 out:
1970 	return ret_val;
1971 }
1972 
1973 /**
1974  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1975  *  @hw: pointer to the HW structure
1976  *  @active: TRUE to enable LPLU, FALSE to disable
1977  *
1978  *  Sets the LPLU D0 state according to the active flag.  When
1979  *  activating LPLU this function also disables smart speed
1980  *  and vice versa.  LPLU will not be activated unless the
1981  *  device autonegotiation advertisement meets standards of
1982  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
1983  *  This is a function pointer entry point only called by
1984  *  PHY setup routines.
1985  **/
1986 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1987 {
1988 	struct e1000_phy_info *phy = &hw->phy;
1989 	u32 phy_ctrl;
1990 	s32 ret_val = E1000_SUCCESS;
1991 	u16 data;
1992 
1993 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
1994 
1995 	if (phy->type == e1000_phy_ife)
1996 		goto out;
1997 
1998 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
1999 
2000 	if (active) {
2001 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2002 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2003 
2004 		if (phy->type != e1000_phy_igp_3)
2005 			goto out;
2006 
2007 		/*
2008 		 * Call gig speed drop workaround on LPLU before accessing
2009 		 * any PHY registers
2010 		 */
2011 		if (hw->mac.type == e1000_ich8lan)
2012 			e1000_gig_downshift_workaround_ich8lan(hw);
2013 
2014 		/* When LPLU is enabled, we should disable SmartSpeed */
2015 		ret_val = phy->ops.read_reg(hw,
2016 		                            IGP01E1000_PHY_PORT_CONFIG,
2017 		                            &data);
2018 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2019 		ret_val = phy->ops.write_reg(hw,
2020 		                             IGP01E1000_PHY_PORT_CONFIG,
2021 		                             data);
2022 		if (ret_val)
2023 			goto out;
2024 	} else {
2025 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2026 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2027 
2028 		if (phy->type != e1000_phy_igp_3)
2029 			goto out;
2030 
2031 		/*
2032 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2033 		 * during Dx states where the power conservation is most
2034 		 * important.  During driver activity we should enable
2035 		 * SmartSpeed, so performance is maintained.
2036 		 */
2037 		if (phy->smart_speed == e1000_smart_speed_on) {
2038 			ret_val = phy->ops.read_reg(hw,
2039 			                            IGP01E1000_PHY_PORT_CONFIG,
2040 			                            &data);
2041 			if (ret_val)
2042 				goto out;
2043 
2044 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2045 			ret_val = phy->ops.write_reg(hw,
2046 			                             IGP01E1000_PHY_PORT_CONFIG,
2047 			                             data);
2048 			if (ret_val)
2049 				goto out;
2050 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2051 			ret_val = phy->ops.read_reg(hw,
2052 			                            IGP01E1000_PHY_PORT_CONFIG,
2053 			                            &data);
2054 			if (ret_val)
2055 				goto out;
2056 
2057 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2058 			ret_val = phy->ops.write_reg(hw,
2059 			                             IGP01E1000_PHY_PORT_CONFIG,
2060 			                             data);
2061 			if (ret_val)
2062 				goto out;
2063 		}
2064 	}
2065 
2066 out:
2067 	return ret_val;
2068 }
2069 
2070 /**
2071  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2072  *  @hw: pointer to the HW structure
2073  *  @active: TRUE to enable LPLU, FALSE to disable
2074  *
2075  *  Sets the LPLU D3 state according to the active flag.  When
2076  *  activating LPLU this function also disables smart speed
2077  *  and vice versa.  LPLU will not be activated unless the
2078  *  device autonegotiation advertisement meets standards of
2079  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2080  *  This is a function pointer entry point only called by
2081  *  PHY setup routines.
2082  **/
2083 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2084 {
2085 	struct e1000_phy_info *phy = &hw->phy;
2086 	u32 phy_ctrl;
2087 	s32 ret_val = E1000_SUCCESS;
2088 	u16 data;
2089 
2090 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2091 
2092 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2093 
2094 	if (!active) {
2095 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2096 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2097 
2098 		if (phy->type != e1000_phy_igp_3)
2099 			goto out;
2100 
2101 		/*
2102 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2103 		 * during Dx states where the power conservation is most
2104 		 * important.  During driver activity we should enable
2105 		 * SmartSpeed, so performance is maintained.
2106 		 */
2107 		if (phy->smart_speed == e1000_smart_speed_on) {
2108 			ret_val = phy->ops.read_reg(hw,
2109 			                            IGP01E1000_PHY_PORT_CONFIG,
2110 			                            &data);
2111 			if (ret_val)
2112 				goto out;
2113 
2114 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2115 			ret_val = phy->ops.write_reg(hw,
2116 			                             IGP01E1000_PHY_PORT_CONFIG,
2117 			                             data);
2118 			if (ret_val)
2119 				goto out;
2120 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2121 			ret_val = phy->ops.read_reg(hw,
2122 			                            IGP01E1000_PHY_PORT_CONFIG,
2123 			                            &data);
2124 			if (ret_val)
2125 				goto out;
2126 
2127 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2128 			ret_val = phy->ops.write_reg(hw,
2129 			                             IGP01E1000_PHY_PORT_CONFIG,
2130 			                             data);
2131 			if (ret_val)
2132 				goto out;
2133 		}
2134 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2135 	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2136 	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2137 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2138 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2139 
2140 		if (phy->type != e1000_phy_igp_3)
2141 			goto out;
2142 
2143 		/*
2144 		 * Call gig speed drop workaround on LPLU before accessing
2145 		 * any PHY registers
2146 		 */
2147 		if (hw->mac.type == e1000_ich8lan)
2148 			e1000_gig_downshift_workaround_ich8lan(hw);
2149 
2150 		/* When LPLU is enabled, we should disable SmartSpeed */
2151 		ret_val = phy->ops.read_reg(hw,
2152 		                            IGP01E1000_PHY_PORT_CONFIG,
2153 		                            &data);
2154 		if (ret_val)
2155 			goto out;
2156 
2157 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2158 		ret_val = phy->ops.write_reg(hw,
2159 		                             IGP01E1000_PHY_PORT_CONFIG,
2160 		                             data);
2161 	}
2162 
2163 out:
2164 	return ret_val;
2165 }
2166 
2167 /**
2168  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2169  *  @hw: pointer to the HW structure
2170  *  @bank:  pointer to the variable that returns the active bank
2171  *
2172  *  Reads signature byte from the NVM using the flash access registers.
2173  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2174  **/
2175 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2176 {
2177 	u32 eecd;
2178 	struct e1000_nvm_info *nvm = &hw->nvm;
2179 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2180 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2181 	u8 sig_byte = 0;
2182 	s32 ret_val = E1000_SUCCESS;
2183 
2184 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2185 
2186 	switch (hw->mac.type) {
2187 	case e1000_ich8lan:
2188 	case e1000_ich9lan:
2189 		eecd = E1000_READ_REG(hw, E1000_EECD);
2190 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2191 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2192 			if (eecd & E1000_EECD_SEC1VAL)
2193 				*bank = 1;
2194 			else
2195 				*bank = 0;
2196 
2197 			goto out;
2198 		}
2199 		DEBUGOUT("Unable to determine valid NVM bank via EEC - "
2200 		         "reading flash signature\n");
2201 		/* fall-thru */
2202 	default:
2203 		/* set bank to 0 in case flash read fails */
2204 		*bank = 0;
2205 
2206 		/* Check bank 0 */
2207 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2208 		                                        &sig_byte);
2209 		if (ret_val)
2210 			goto out;
2211 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2212 		    E1000_ICH_NVM_SIG_VALUE) {
2213 			*bank = 0;
2214 			goto out;
2215 		}
2216 
2217 		/* Check bank 1 */
2218 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2219 		                                        bank1_offset,
2220 		                                        &sig_byte);
2221 		if (ret_val)
2222 			goto out;
2223 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2224 		    E1000_ICH_NVM_SIG_VALUE) {
2225 			*bank = 1;
2226 			goto out;
2227 		}
2228 
2229 		DEBUGOUT("ERROR: No valid NVM bank present\n");
2230 		ret_val = -E1000_ERR_NVM;
2231 		break;
2232 	}
2233 out:
2234 	return ret_val;
2235 }
2236 
2237 /**
2238  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2239  *  @hw: pointer to the HW structure
2240  *  @offset: The offset (in bytes) of the word(s) to read.
2241  *  @words: Size of data to read in words
2242  *  @data: Pointer to the word(s) to read at offset.
2243  *
2244  *  Reads a word(s) from the NVM using the flash access registers.
2245  **/
2246 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2247                                   u16 *data)
2248 {
2249 	struct e1000_nvm_info *nvm = &hw->nvm;
2250 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2251 	u32 act_offset;
2252 	s32 ret_val = E1000_SUCCESS;
2253 	u32 bank = 0;
2254 	u16 i, word;
2255 
2256 	DEBUGFUNC("e1000_read_nvm_ich8lan");
2257 
2258 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2259 	    (words == 0)) {
2260 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2261 		ret_val = -E1000_ERR_NVM;
2262 		goto out;
2263 	}
2264 
2265 	nvm->ops.acquire(hw);
2266 
2267 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2268 	if (ret_val != E1000_SUCCESS) {
2269 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2270 		bank = 0;
2271 	}
2272 
2273 	act_offset = (bank) ? nvm->flash_bank_size : 0;
2274 	act_offset += offset;
2275 
2276 	ret_val = E1000_SUCCESS;
2277 	for (i = 0; i < words; i++) {
2278 		if ((dev_spec->shadow_ram) &&
2279 		    (dev_spec->shadow_ram[offset+i].modified)) {
2280 			data[i] = dev_spec->shadow_ram[offset+i].value;
2281 		} else {
2282 			ret_val = e1000_read_flash_word_ich8lan(hw,
2283 			                                        act_offset + i,
2284 			                                        &word);
2285 			if (ret_val)
2286 				break;
2287 			data[i] = word;
2288 		}
2289 	}
2290 
2291 	nvm->ops.release(hw);
2292 
2293 out:
2294 	if (ret_val)
2295 		DEBUGOUT1("NVM read error: %d\n", ret_val);
2296 
2297 	return ret_val;
2298 }
2299 
2300 /**
2301  *  e1000_flash_cycle_init_ich8lan - Initialize flash
2302  *  @hw: pointer to the HW structure
2303  *
2304  *  This function does initial flash setup so that a new read/write/erase cycle
2305  *  can be started.
2306  **/
2307 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2308 {
2309 	union ich8_hws_flash_status hsfsts;
2310 	s32 ret_val = -E1000_ERR_NVM;
2311 	s32 i = 0;
2312 
2313 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2314 
2315 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2316 
2317 	/* Check if the flash descriptor is valid */
2318 	if (hsfsts.hsf_status.fldesvalid == 0) {
2319 		DEBUGOUT("Flash descriptor invalid.  "
2320 		         "SW Sequencing must be used.");
2321 		goto out;
2322 	}
2323 
2324 	/* Clear FCERR and DAEL in hw status by writing 1 */
2325 	hsfsts.hsf_status.flcerr = 1;
2326 	hsfsts.hsf_status.dael = 1;
2327 
2328 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2329 
2330 	/*
2331 	 * Either we should have a hardware SPI cycle in progress
2332 	 * bit to check against, in order to start a new cycle or
2333 	 * FDONE bit should be changed in the hardware so that it
2334 	 * is 1 after hardware reset, which can then be used as an
2335 	 * indication whether a cycle is in progress or has been
2336 	 * completed.
2337 	 */
2338 
2339 	if (hsfsts.hsf_status.flcinprog == 0) {
2340 		/*
2341 		 * There is no cycle running at present,
2342 		 * so we can start a cycle.
2343 		 * Begin by setting Flash Cycle Done.
2344 		 */
2345 		hsfsts.hsf_status.flcdone = 1;
2346 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2347 		ret_val = E1000_SUCCESS;
2348 	} else {
2349 		/*
2350 		 * Otherwise poll for sometime so the current
2351 		 * cycle has a chance to end before giving up.
2352 		 */
2353 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2354 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2355 			                                      ICH_FLASH_HSFSTS);
2356 			if (hsfsts.hsf_status.flcinprog == 0) {
2357 				ret_val = E1000_SUCCESS;
2358 				break;
2359 			}
2360 			usec_delay(1);
2361 		}
2362 		if (ret_val == E1000_SUCCESS) {
2363 			/*
2364 			 * Successful in waiting for previous cycle to timeout,
2365 			 * now set the Flash Cycle Done.
2366 			 */
2367 			hsfsts.hsf_status.flcdone = 1;
2368 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2369 			                        hsfsts.regval);
2370 		} else {
2371 			DEBUGOUT("Flash controller busy, cannot get access");
2372 		}
2373 	}
2374 
2375 out:
2376 	return ret_val;
2377 }
2378 
2379 /**
2380  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2381  *  @hw: pointer to the HW structure
2382  *  @timeout: maximum time to wait for completion
2383  *
2384  *  This function starts a flash cycle and waits for its completion.
2385  **/
2386 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2387 {
2388 	union ich8_hws_flash_ctrl hsflctl;
2389 	union ich8_hws_flash_status hsfsts;
2390 	s32 ret_val = -E1000_ERR_NVM;
2391 	u32 i = 0;
2392 
2393 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
2394 
2395 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2396 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2397 	hsflctl.hsf_ctrl.flcgo = 1;
2398 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2399 
2400 	/* wait till FDONE bit is set to 1 */
2401 	do {
2402 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2403 		if (hsfsts.hsf_status.flcdone == 1)
2404 			break;
2405 		usec_delay(1);
2406 	} while (i++ < timeout);
2407 
2408 	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2409 		ret_val = E1000_SUCCESS;
2410 
2411 	return ret_val;
2412 }
2413 
2414 /**
2415  *  e1000_read_flash_word_ich8lan - Read word from flash
2416  *  @hw: pointer to the HW structure
2417  *  @offset: offset to data location
2418  *  @data: pointer to the location for storing the data
2419  *
2420  *  Reads the flash word at offset into data.  Offset is converted
2421  *  to bytes before read.
2422  **/
2423 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2424                                          u16 *data)
2425 {
2426 	s32 ret_val;
2427 
2428 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
2429 
2430 	if (!data) {
2431 		ret_val = -E1000_ERR_NVM;
2432 		goto out;
2433 	}
2434 
2435 	/* Must convert offset into bytes. */
2436 	offset <<= 1;
2437 
2438 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2439 
2440 out:
2441 	return ret_val;
2442 }
2443 
2444 /**
2445  *  e1000_read_flash_byte_ich8lan - Read byte from flash
2446  *  @hw: pointer to the HW structure
2447  *  @offset: The offset of the byte to read.
2448  *  @data: Pointer to a byte to store the value read.
2449  *
2450  *  Reads a single byte from the NVM using the flash access registers.
2451  **/
2452 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2453                                          u8 *data)
2454 {
2455 	s32 ret_val = E1000_SUCCESS;
2456 	u16 word = 0;
2457 
2458 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2459 	if (ret_val)
2460 		goto out;
2461 
2462 	*data = (u8)word;
2463 
2464 out:
2465 	return ret_val;
2466 }
2467 
2468 /**
2469  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
2470  *  @hw: pointer to the HW structure
2471  *  @offset: The offset (in bytes) of the byte or word to read.
2472  *  @size: Size of data to read, 1=byte 2=word
2473  *  @data: Pointer to the word to store the value read.
2474  *
2475  *  Reads a byte or word from the NVM using the flash access registers.
2476  **/
2477 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2478                                          u8 size, u16 *data)
2479 {
2480 	union ich8_hws_flash_status hsfsts;
2481 	union ich8_hws_flash_ctrl hsflctl;
2482 	u32 flash_linear_addr;
2483 	u32 flash_data = 0;
2484 	s32 ret_val = -E1000_ERR_NVM;
2485 	u8 count = 0;
2486 
2487 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
2488 
2489 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2490 		goto out;
2491 
2492 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2493 	                    hw->nvm.flash_base_addr;
2494 
2495 	do {
2496 		usec_delay(1);
2497 		/* Steps */
2498 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2499 		if (ret_val != E1000_SUCCESS)
2500 			break;
2501 
2502 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2503 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2504 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2505 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2506 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2507 
2508 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2509 
2510 		ret_val = e1000_flash_cycle_ich8lan(hw,
2511 		                                ICH_FLASH_READ_COMMAND_TIMEOUT);
2512 
2513 		/*
2514 		 * Check if FCERR is set to 1, if set to 1, clear it
2515 		 * and try the whole sequence a few more times, else
2516 		 * read in (shift in) the Flash Data0, the order is
2517 		 * least significant byte first msb to lsb
2518 		 */
2519 		if (ret_val == E1000_SUCCESS) {
2520 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2521 			if (size == 1)
2522 				*data = (u8)(flash_data & 0x000000FF);
2523 			else if (size == 2)
2524 				*data = (u16)(flash_data & 0x0000FFFF);
2525 			break;
2526 		} else {
2527 			/*
2528 			 * If we've gotten here, then things are probably
2529 			 * completely hosed, but if the error condition is
2530 			 * detected, it won't hurt to give it another try...
2531 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2532 			 */
2533 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2534 			                                      ICH_FLASH_HSFSTS);
2535 			if (hsfsts.hsf_status.flcerr == 1) {
2536 				/* Repeat for some time before giving up. */
2537 				continue;
2538 			} else if (hsfsts.hsf_status.flcdone == 0) {
2539 				DEBUGOUT("Timeout error - flash cycle "
2540 				         "did not complete.");
2541 				break;
2542 			}
2543 		}
2544 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2545 
2546 out:
2547 	return ret_val;
2548 }
2549 
2550 /**
2551  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
2552  *  @hw: pointer to the HW structure
2553  *  @offset: The offset (in bytes) of the word(s) to write.
2554  *  @words: Size of data to write in words
2555  *  @data: Pointer to the word(s) to write at offset.
2556  *
2557  *  Writes a byte or word to the NVM using the flash access registers.
2558  **/
2559 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2560                                    u16 *data)
2561 {
2562 	struct e1000_nvm_info *nvm = &hw->nvm;
2563 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2564 	s32 ret_val = E1000_SUCCESS;
2565 	u16 i;
2566 
2567 	DEBUGFUNC("e1000_write_nvm_ich8lan");
2568 
2569 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2570 	    (words == 0)) {
2571 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2572 		ret_val = -E1000_ERR_NVM;
2573 		goto out;
2574 	}
2575 
2576 	nvm->ops.acquire(hw);
2577 
2578 	for (i = 0; i < words; i++) {
2579 		dev_spec->shadow_ram[offset+i].modified = TRUE;
2580 		dev_spec->shadow_ram[offset+i].value = data[i];
2581 	}
2582 
2583 	nvm->ops.release(hw);
2584 
2585 out:
2586 	return ret_val;
2587 }
2588 
2589 /**
2590  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2591  *  @hw: pointer to the HW structure
2592  *
2593  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
2594  *  which writes the checksum to the shadow ram.  The changes in the shadow
2595  *  ram are then committed to the EEPROM by processing each bank at a time
2596  *  checking for the modified bit and writing only the pending changes.
2597  *  After a successful commit, the shadow ram is cleared and is ready for
2598  *  future writes.
2599  **/
2600 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2601 {
2602 	struct e1000_nvm_info *nvm = &hw->nvm;
2603 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2604 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2605 	s32 ret_val;
2606 	u16 data;
2607 
2608 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2609 
2610 	ret_val = e1000_update_nvm_checksum_generic(hw);
2611 	if (ret_val)
2612 		goto out;
2613 
2614 	if (nvm->type != e1000_nvm_flash_sw)
2615 		goto out;
2616 
2617 	nvm->ops.acquire(hw);
2618 
2619 	/*
2620 	 * We're writing to the opposite bank so if we're on bank 1,
2621 	 * write to bank 0 etc.  We also need to erase the segment that
2622 	 * is going to be written
2623 	 */
2624 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2625 	if (ret_val != E1000_SUCCESS) {
2626 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2627 		bank = 0;
2628 	}
2629 
2630 	if (bank == 0) {
2631 		new_bank_offset = nvm->flash_bank_size;
2632 		old_bank_offset = 0;
2633 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2634 		if (ret_val)
2635 			goto release;
2636 	} else {
2637 		old_bank_offset = nvm->flash_bank_size;
2638 		new_bank_offset = 0;
2639 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2640 		if (ret_val)
2641 			goto release;
2642 	}
2643 
2644 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2645 		/*
2646 		 * Determine whether to write the value stored
2647 		 * in the other NVM bank or a modified value stored
2648 		 * in the shadow RAM
2649 		 */
2650 		if (dev_spec->shadow_ram[i].modified) {
2651 			data = dev_spec->shadow_ram[i].value;
2652 		} else {
2653 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
2654 			                                        old_bank_offset,
2655 			                                        &data);
2656 			if (ret_val)
2657 				break;
2658 		}
2659 
2660 		/*
2661 		 * If the word is 0x13, then make sure the signature bits
2662 		 * (15:14) are 11b until the commit has completed.
2663 		 * This will allow us to write 10b which indicates the
2664 		 * signature is valid.  We want to do this after the write
2665 		 * has completed so that we don't mark the segment valid
2666 		 * while the write is still in progress
2667 		 */
2668 		if (i == E1000_ICH_NVM_SIG_WORD)
2669 			data |= E1000_ICH_NVM_SIG_MASK;
2670 
2671 		/* Convert offset to bytes. */
2672 		act_offset = (i + new_bank_offset) << 1;
2673 
2674 		usec_delay(100);
2675 		/* Write the bytes to the new bank. */
2676 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2677 		                                               act_offset,
2678 		                                               (u8)data);
2679 		if (ret_val)
2680 			break;
2681 
2682 		usec_delay(100);
2683 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2684 		                                          act_offset + 1,
2685 		                                          (u8)(data >> 8));
2686 		if (ret_val)
2687 			break;
2688 	}
2689 
2690 	/*
2691 	 * Don't bother writing the segment valid bits if sector
2692 	 * programming failed.
2693 	 */
2694 	if (ret_val) {
2695 		DEBUGOUT("Flash commit failed.\n");
2696 		goto release;
2697 	}
2698 
2699 	/*
2700 	 * Finally validate the new segment by setting bit 15:14
2701 	 * to 10b in word 0x13 , this can be done without an
2702 	 * erase as well since these bits are 11 to start with
2703 	 * and we need to change bit 14 to 0b
2704 	 */
2705 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2706 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2707 	if (ret_val)
2708 		goto release;
2709 
2710 	data &= 0xBFFF;
2711 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2712 	                                               act_offset * 2 + 1,
2713 	                                               (u8)(data >> 8));
2714 	if (ret_val)
2715 		goto release;
2716 
2717 	/*
2718 	 * And invalidate the previously valid segment by setting
2719 	 * its signature word (0x13) high_byte to 0b. This can be
2720 	 * done without an erase because flash erase sets all bits
2721 	 * to 1's. We can write 1's to 0's without an erase
2722 	 */
2723 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2724 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2725 	if (ret_val)
2726 		goto release;
2727 
2728 	/* Great!  Everything worked, we can now clear the cached entries. */
2729 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2730 		dev_spec->shadow_ram[i].modified = FALSE;
2731 		dev_spec->shadow_ram[i].value = 0xFFFF;
2732 	}
2733 
2734 release:
2735 	nvm->ops.release(hw);
2736 
2737 	/*
2738 	 * Reload the EEPROM, or else modifications will not appear
2739 	 * until after the next adapter reset.
2740 	 */
2741 	if (!ret_val) {
2742 		nvm->ops.reload(hw);
2743 		msec_delay(10);
2744 	}
2745 
2746 out:
2747 	if (ret_val)
2748 		DEBUGOUT1("NVM update error: %d\n", ret_val);
2749 
2750 	return ret_val;
2751 }
2752 
2753 /**
2754  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2755  *  @hw: pointer to the HW structure
2756  *
2757  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2758  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
2759  *  calculated, in which case we need to calculate the checksum and set bit 6.
2760  **/
2761 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2762 {
2763 	s32 ret_val = E1000_SUCCESS;
2764 	u16 data;
2765 
2766 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2767 
2768 	/*
2769 	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
2770 	 * needs to be fixed.  This bit is an indication that the NVM
2771 	 * was prepared by OEM software and did not calculate the
2772 	 * checksum...a likely scenario.
2773 	 */
2774 	ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2775 	if (ret_val)
2776 		goto out;
2777 
2778 	if ((data & 0x40) == 0) {
2779 		data |= 0x40;
2780 		ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2781 		if (ret_val)
2782 			goto out;
2783 		ret_val = hw->nvm.ops.update(hw);
2784 		if (ret_val)
2785 			goto out;
2786 	}
2787 
2788 	ret_val = e1000_validate_nvm_checksum_generic(hw);
2789 
2790 out:
2791 	return ret_val;
2792 }
2793 
2794 /**
2795  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2796  *  @hw: pointer to the HW structure
2797  *  @offset: The offset (in bytes) of the byte/word to read.
2798  *  @size: Size of data to read, 1=byte 2=word
2799  *  @data: The byte(s) to write to the NVM.
2800  *
2801  *  Writes one/two bytes to the NVM using the flash access registers.
2802  **/
2803 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2804                                           u8 size, u16 data)
2805 {
2806 	union ich8_hws_flash_status hsfsts;
2807 	union ich8_hws_flash_ctrl hsflctl;
2808 	u32 flash_linear_addr;
2809 	u32 flash_data = 0;
2810 	s32 ret_val = -E1000_ERR_NVM;
2811 	u8 count = 0;
2812 
2813 	DEBUGFUNC("e1000_write_ich8_data");
2814 
2815 	if (size < 1 || size > 2 || data > size * 0xff ||
2816 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
2817 		goto out;
2818 
2819 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2820 	                    hw->nvm.flash_base_addr;
2821 
2822 	do {
2823 		usec_delay(1);
2824 		/* Steps */
2825 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2826 		if (ret_val != E1000_SUCCESS)
2827 			break;
2828 
2829 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2830 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2831 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2832 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2833 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2834 
2835 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2836 
2837 		if (size == 1)
2838 			flash_data = (u32)data & 0x00FF;
2839 		else
2840 			flash_data = (u32)data;
2841 
2842 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2843 
2844 		/*
2845 		 * check if FCERR is set to 1 , if set to 1, clear it
2846 		 * and try the whole sequence a few more times else done
2847 		 */
2848 		ret_val = e1000_flash_cycle_ich8lan(hw,
2849 		                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2850 		if (ret_val == E1000_SUCCESS)
2851 			break;
2852 
2853 		/*
2854 		 * If we're here, then things are most likely
2855 		 * completely hosed, but if the error condition
2856 		 * is detected, it won't hurt to give it another
2857 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2858 		 */
2859 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2860 		if (hsfsts.hsf_status.flcerr == 1)
2861 			/* Repeat for some time before giving up. */
2862 			continue;
2863 		if (hsfsts.hsf_status.flcdone == 0) {
2864 			DEBUGOUT("Timeout error - flash cycle "
2865 				 "did not complete.");
2866 			break;
2867 		}
2868 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2869 
2870 out:
2871 	return ret_val;
2872 }
2873 
2874 /**
2875  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2876  *  @hw: pointer to the HW structure
2877  *  @offset: The index of the byte to read.
2878  *  @data: The byte to write to the NVM.
2879  *
2880  *  Writes a single byte to the NVM using the flash access registers.
2881  **/
2882 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2883                                           u8 data)
2884 {
2885 	u16 word = (u16)data;
2886 
2887 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2888 
2889 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2890 }
2891 
2892 /**
2893  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2894  *  @hw: pointer to the HW structure
2895  *  @offset: The offset of the byte to write.
2896  *  @byte: The byte to write to the NVM.
2897  *
2898  *  Writes a single byte to the NVM using the flash access registers.
2899  *  Goes through a retry algorithm before giving up.
2900  **/
2901 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2902                                                 u32 offset, u8 byte)
2903 {
2904 	s32 ret_val;
2905 	u16 program_retries;
2906 
2907 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2908 
2909 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2910 	if (ret_val == E1000_SUCCESS)
2911 		goto out;
2912 
2913 	for (program_retries = 0; program_retries < 100; program_retries++) {
2914 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2915 		usec_delay(100);
2916 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2917 		if (ret_val == E1000_SUCCESS)
2918 			break;
2919 	}
2920 	if (program_retries == 100) {
2921 		ret_val = -E1000_ERR_NVM;
2922 		goto out;
2923 	}
2924 
2925 out:
2926 	return ret_val;
2927 }
2928 
2929 /**
2930  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2931  *  @hw: pointer to the HW structure
2932  *  @bank: 0 for first bank, 1 for second bank, etc.
2933  *
2934  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2935  *  bank N is 4096 * N + flash_reg_addr.
2936  **/
2937 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2938 {
2939 	struct e1000_nvm_info *nvm = &hw->nvm;
2940 	union ich8_hws_flash_status hsfsts;
2941 	union ich8_hws_flash_ctrl hsflctl;
2942 	u32 flash_linear_addr;
2943 	/* bank size is in 16bit words - adjust to bytes */
2944 	u32 flash_bank_size = nvm->flash_bank_size * 2;
2945 	s32 ret_val = E1000_SUCCESS;
2946 	s32 count = 0;
2947 	s32 j, iteration, sector_size;
2948 
2949 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
2950 
2951 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2952 
2953 	/*
2954 	 * Determine HW Sector size: Read BERASE bits of hw flash status
2955 	 * register
2956 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2957 	 *     consecutive sectors.  The start index for the nth Hw sector
2958 	 *     can be calculated as = bank * 4096 + n * 256
2959 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2960 	 *     The start index for the nth Hw sector can be calculated
2961 	 *     as = bank * 4096
2962 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2963 	 *     (ich9 only, otherwise error condition)
2964 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2965 	 */
2966 	switch (hsfsts.hsf_status.berasesz) {
2967 	case 0:
2968 		/* Hw sector size 256 */
2969 		sector_size = ICH_FLASH_SEG_SIZE_256;
2970 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2971 		break;
2972 	case 1:
2973 		sector_size = ICH_FLASH_SEG_SIZE_4K;
2974 		iteration = 1;
2975 		break;
2976 	case 2:
2977 		sector_size = ICH_FLASH_SEG_SIZE_8K;
2978 		iteration = 1;
2979 		break;
2980 	case 3:
2981 		sector_size = ICH_FLASH_SEG_SIZE_64K;
2982 		iteration = 1;
2983 		break;
2984 	default:
2985 		ret_val = -E1000_ERR_NVM;
2986 		goto out;
2987 	}
2988 
2989 	/* Start with the base address, then add the sector offset. */
2990 	flash_linear_addr = hw->nvm.flash_base_addr;
2991 	flash_linear_addr += (bank) ? flash_bank_size : 0;
2992 
2993 	for (j = 0; j < iteration ; j++) {
2994 		do {
2995 			/* Steps */
2996 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
2997 			if (ret_val)
2998 				goto out;
2999 
3000 			/*
3001 			 * Write a value 11 (block Erase) in Flash
3002 			 * Cycle field in hw flash control
3003 			 */
3004 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3005 			                                      ICH_FLASH_HSFCTL);
3006 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3007 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3008 			                        hsflctl.regval);
3009 
3010 			/*
3011 			 * Write the last 24 bits of an index within the
3012 			 * block into Flash Linear address field in Flash
3013 			 * Address.
3014 			 */
3015 			flash_linear_addr += (j * sector_size);
3016 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3017 			                      flash_linear_addr);
3018 
3019 			ret_val = e1000_flash_cycle_ich8lan(hw,
3020 			                       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3021 			if (ret_val == E1000_SUCCESS)
3022 				break;
3023 
3024 			/*
3025 			 * Check if FCERR is set to 1.  If 1,
3026 			 * clear it and try the whole sequence
3027 			 * a few more times else Done
3028 			 */
3029 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3030 						      ICH_FLASH_HSFSTS);
3031 			if (hsfsts.hsf_status.flcerr == 1)
3032 				/* repeat for some time before giving up */
3033 				continue;
3034 			else if (hsfsts.hsf_status.flcdone == 0)
3035 				goto out;
3036 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3037 	}
3038 
3039 out:
3040 	return ret_val;
3041 }
3042 
3043 /**
3044  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3045  *  @hw: pointer to the HW structure
3046  *  @data: Pointer to the LED settings
3047  *
3048  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3049  *  settings is all 0's or F's, set the LED default to a valid LED default
3050  *  setting.
3051  **/
3052 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3053 {
3054 	s32 ret_val;
3055 
3056 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
3057 
3058 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3059 	if (ret_val) {
3060 		DEBUGOUT("NVM Read Error\n");
3061 		goto out;
3062 	}
3063 
3064 	if (*data == ID_LED_RESERVED_0000 ||
3065 	    *data == ID_LED_RESERVED_FFFF)
3066 		*data = ID_LED_DEFAULT_ICH8LAN;
3067 
3068 out:
3069 	return ret_val;
3070 }
3071 
3072 /**
3073  *  e1000_id_led_init_pchlan - store LED configurations
3074  *  @hw: pointer to the HW structure
3075  *
3076  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3077  *  the PHY LED configuration register.
3078  *
3079  *  PCH also does not have an "always on" or "always off" mode which
3080  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3081  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3082  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3083  *  link based on logic in e1000_led_[on|off]_pchlan().
3084  **/
3085 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3086 {
3087 	struct e1000_mac_info *mac = &hw->mac;
3088 	s32 ret_val;
3089 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3090 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3091 	u16 data, i, temp, shift;
3092 
3093 	DEBUGFUNC("e1000_id_led_init_pchlan");
3094 
3095 	/* Get default ID LED modes */
3096 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3097 	if (ret_val)
3098 		goto out;
3099 
3100 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3101 	mac->ledctl_mode1 = mac->ledctl_default;
3102 	mac->ledctl_mode2 = mac->ledctl_default;
3103 
3104 	for (i = 0; i < 4; i++) {
3105 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3106 		shift = (i * 5);
3107 		switch (temp) {
3108 		case ID_LED_ON1_DEF2:
3109 		case ID_LED_ON1_ON2:
3110 		case ID_LED_ON1_OFF2:
3111 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3112 			mac->ledctl_mode1 |= (ledctl_on << shift);
3113 			break;
3114 		case ID_LED_OFF1_DEF2:
3115 		case ID_LED_OFF1_ON2:
3116 		case ID_LED_OFF1_OFF2:
3117 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3118 			mac->ledctl_mode1 |= (ledctl_off << shift);
3119 			break;
3120 		default:
3121 			/* Do nothing */
3122 			break;
3123 		}
3124 		switch (temp) {
3125 		case ID_LED_DEF1_ON2:
3126 		case ID_LED_ON1_ON2:
3127 		case ID_LED_OFF1_ON2:
3128 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3129 			mac->ledctl_mode2 |= (ledctl_on << shift);
3130 			break;
3131 		case ID_LED_DEF1_OFF2:
3132 		case ID_LED_ON1_OFF2:
3133 		case ID_LED_OFF1_OFF2:
3134 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3135 			mac->ledctl_mode2 |= (ledctl_off << shift);
3136 			break;
3137 		default:
3138 			/* Do nothing */
3139 			break;
3140 		}
3141 	}
3142 
3143 out:
3144 	return ret_val;
3145 }
3146 
3147 /**
3148  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3149  *  @hw: pointer to the HW structure
3150  *
3151  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3152  *  register, so the the bus width is hard coded.
3153  **/
3154 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3155 {
3156 	struct e1000_bus_info *bus = &hw->bus;
3157 	s32 ret_val;
3158 
3159 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
3160 
3161 	ret_val = e1000_get_bus_info_pcie_generic(hw);
3162 
3163 	/*
3164 	 * ICH devices are "PCI Express"-ish.  They have
3165 	 * a configuration space, but do not contain
3166 	 * PCI Express Capability registers, so bus width
3167 	 * must be hardcoded.
3168 	 */
3169 	if (bus->width == e1000_bus_width_unknown)
3170 		bus->width = e1000_bus_width_pcie_x1;
3171 
3172 	return ret_val;
3173 }
3174 
3175 /**
3176  *  e1000_reset_hw_ich8lan - Reset the hardware
3177  *  @hw: pointer to the HW structure
3178  *
3179  *  Does a full reset of the hardware which includes a reset of the PHY and
3180  *  MAC.
3181  **/
3182 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3183 {
3184 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3185 	u16 reg;
3186 	u32 ctrl, icr, kab;
3187 	s32 ret_val;
3188 
3189 	DEBUGFUNC("e1000_reset_hw_ich8lan");
3190 
3191 	/*
3192 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
3193 	 * on the last TLP read/write transaction when MAC is reset.
3194 	 */
3195 	ret_val = e1000_disable_pcie_master_generic(hw);
3196 	if (ret_val)
3197 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
3198 
3199 	DEBUGOUT("Masking off all interrupts\n");
3200 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3201 
3202 	/*
3203 	 * Disable the Transmit and Receive units.  Then delay to allow
3204 	 * any pending transactions to complete before we hit the MAC
3205 	 * with the global reset.
3206 	 */
3207 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
3208 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3209 	E1000_WRITE_FLUSH(hw);
3210 
3211 	msec_delay(10);
3212 
3213 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3214 	if (hw->mac.type == e1000_ich8lan) {
3215 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3216 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3217 		/* Set Packet Buffer Size to 16k. */
3218 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3219 	}
3220 
3221 	if (hw->mac.type == e1000_pchlan) {
3222 		/* Save the NVM K1 bit setting*/
3223 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
3224 		if (ret_val)
3225 			return ret_val;
3226 
3227 		if (reg & E1000_NVM_K1_ENABLE)
3228 			dev_spec->nvm_k1_enabled = TRUE;
3229 		else
3230 			dev_spec->nvm_k1_enabled = FALSE;
3231 	}
3232 
3233 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3234 
3235 	if (!hw->phy.ops.check_reset_block(hw)) {
3236 		/*
3237 		 * Full-chip reset requires MAC and PHY reset at the same
3238 		 * time to make sure the interface between MAC and the
3239 		 * external PHY is reset.
3240 		 */
3241 		ctrl |= E1000_CTRL_PHY_RST;
3242 
3243 		/*
3244 		 * Gate automatic PHY configuration by hardware on
3245 		 * non-managed 82579
3246 		 */
3247 		if ((hw->mac.type == e1000_pch2lan) &&
3248 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3249 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3250 	}
3251 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3252 	DEBUGOUT("Issuing a global reset to ich8lan\n");
3253 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3254 	msec_delay(20);
3255 
3256 	if (!ret_val)
3257 		e1000_release_swflag_ich8lan(hw);
3258 
3259 	if (ctrl & E1000_CTRL_PHY_RST) {
3260 		ret_val = hw->phy.ops.get_cfg_done(hw);
3261 		if (ret_val)
3262 			goto out;
3263 
3264 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3265 		if (ret_val)
3266 			goto out;
3267 	}
3268 
3269 	/*
3270 	 * For PCH, this write will make sure that any noise
3271 	 * will be detected as a CRC error and be dropped rather than show up
3272 	 * as a bad packet to the DMA engine.
3273 	 */
3274 	if (hw->mac.type == e1000_pchlan)
3275 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3276 
3277 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3278 	icr = E1000_READ_REG(hw, E1000_ICR);
3279 
3280 	kab = E1000_READ_REG(hw, E1000_KABGTXD);
3281 	kab |= E1000_KABGTXD_BGSQLBIAS;
3282 	E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3283 
3284 out:
3285 	return ret_val;
3286 }
3287 
3288 /**
3289  *  e1000_init_hw_ich8lan - Initialize the hardware
3290  *  @hw: pointer to the HW structure
3291  *
3292  *  Prepares the hardware for transmit and receive by doing the following:
3293  *   - initialize hardware bits
3294  *   - initialize LED identification
3295  *   - setup receive address registers
3296  *   - setup flow control
3297  *   - setup transmit descriptors
3298  *   - clear statistics
3299  **/
3300 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3301 {
3302 	struct e1000_mac_info *mac = &hw->mac;
3303 	u32 ctrl_ext, txdctl, snoop;
3304 	s32 ret_val;
3305 	u16 i;
3306 
3307 	DEBUGFUNC("e1000_init_hw_ich8lan");
3308 
3309 	e1000_initialize_hw_bits_ich8lan(hw);
3310 
3311 	/* Initialize identification LED */
3312 	ret_val = mac->ops.id_led_init(hw);
3313 	if (ret_val)
3314 		DEBUGOUT("Error initializing identification LED\n");
3315 		/* This is not fatal and we should not stop init due to this */
3316 
3317 	/* Setup the receive address. */
3318 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3319 
3320 	/* Zero out the Multicast HASH table */
3321 	DEBUGOUT("Zeroing the MTA\n");
3322 	for (i = 0; i < mac->mta_reg_count; i++)
3323 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3324 
3325 	/*
3326 	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3327 	 * the ME.  Reading the BM_WUC register will clear the host wakeup bit.
3328 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3329 	 */
3330 	if (hw->phy.type == e1000_phy_82578) {
3331 		hw->phy.ops.read_reg(hw, BM_WUC, &i);
3332 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
3333 		if (ret_val)
3334 			return ret_val;
3335 	}
3336 
3337 	/* Setup link and flow control */
3338 	ret_val = mac->ops.setup_link(hw);
3339 
3340 	/* Set the transmit descriptor write-back policy for both queues */
3341 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3342 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3343 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3344 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3345 	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3346 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3347 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3348 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3349 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3350 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3351 	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3352 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3353 
3354 	/*
3355 	 * ICH8 has opposite polarity of no_snoop bits.
3356 	 * By default, we should use snoop behavior.
3357 	 */
3358 	if (mac->type == e1000_ich8lan)
3359 		snoop = PCIE_ICH8_SNOOP_ALL;
3360 	else
3361 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3362 	e1000_set_pcie_no_snoop_generic(hw, snoop);
3363 
3364 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3365 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3366 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3367 
3368 	/*
3369 	 * Clear all of the statistics registers (clear on read).  It is
3370 	 * important that we do this after we have tried to establish link
3371 	 * because the symbol error count will increment wildly if there
3372 	 * is no link.
3373 	 */
3374 	e1000_clear_hw_cntrs_ich8lan(hw);
3375 
3376 	return ret_val;
3377 }
3378 /**
3379  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3380  *  @hw: pointer to the HW structure
3381  *
3382  *  Sets/Clears required hardware bits necessary for correctly setting up the
3383  *  hardware for transmit and receive.
3384  **/
3385 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3386 {
3387 	u32 reg;
3388 
3389 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3390 
3391 	/* Extended Device Control */
3392 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3393 	reg |= (1 << 22);
3394 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3395 	if (hw->mac.type >= e1000_pchlan)
3396 		reg |= E1000_CTRL_EXT_PHYPDEN;
3397 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3398 
3399 	/* Transmit Descriptor Control 0 */
3400 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3401 	reg |= (1 << 22);
3402 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3403 
3404 	/* Transmit Descriptor Control 1 */
3405 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3406 	reg |= (1 << 22);
3407 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3408 
3409 	/* Transmit Arbitration Control 0 */
3410 	reg = E1000_READ_REG(hw, E1000_TARC(0));
3411 	if (hw->mac.type == e1000_ich8lan)
3412 		reg |= (1 << 28) | (1 << 29);
3413 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3414 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3415 
3416 	/* Transmit Arbitration Control 1 */
3417 	reg = E1000_READ_REG(hw, E1000_TARC(1));
3418 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3419 		reg &= ~(1 << 28);
3420 	else
3421 		reg |= (1 << 28);
3422 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
3423 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3424 
3425 	/* Device Status */
3426 	if (hw->mac.type == e1000_ich8lan) {
3427 		reg = E1000_READ_REG(hw, E1000_STATUS);
3428 		reg &= ~(1 << 31);
3429 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
3430 	}
3431 
3432 	/*
3433 	 * work-around descriptor data corruption issue during nfs v2 udp
3434 	 * traffic, just disable the nfs filtering capability
3435 	 */
3436 	reg = E1000_READ_REG(hw, E1000_RFCTL);
3437 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3438 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3439 
3440 	return;
3441 }
3442 
3443 /**
3444  *  e1000_setup_link_ich8lan - Setup flow control and link settings
3445  *  @hw: pointer to the HW structure
3446  *
3447  *  Determines which flow control settings to use, then configures flow
3448  *  control.  Calls the appropriate media-specific link configuration
3449  *  function.  Assuming the adapter has a valid link partner, a valid link
3450  *  should be established.  Assumes the hardware has previously been reset
3451  *  and the transmitter and receiver are not enabled.
3452  **/
3453 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3454 {
3455 	s32 ret_val = E1000_SUCCESS;
3456 
3457 	DEBUGFUNC("e1000_setup_link_ich8lan");
3458 
3459 	if (hw->phy.ops.check_reset_block(hw))
3460 		goto out;
3461 
3462 	/*
3463 	 * ICH parts do not have a word in the NVM to determine
3464 	 * the default flow control setting, so we explicitly
3465 	 * set it to full.
3466 	 */
3467 	if (hw->fc.requested_mode == e1000_fc_default)
3468 		hw->fc.requested_mode = e1000_fc_full;
3469 
3470 	/*
3471 	 * Save off the requested flow control mode for use later.  Depending
3472 	 * on the link partner's capabilities, we may or may not use this mode.
3473 	 */
3474 	hw->fc.current_mode = hw->fc.requested_mode;
3475 
3476 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3477 		hw->fc.current_mode);
3478 
3479 	/* Continue to configure the copper link. */
3480 	ret_val = hw->mac.ops.setup_physical_interface(hw);
3481 	if (ret_val)
3482 		goto out;
3483 
3484 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3485 	if ((hw->phy.type == e1000_phy_82578) ||
3486 	    (hw->phy.type == e1000_phy_82579) ||
3487 	    (hw->phy.type == e1000_phy_82577)) {
3488 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3489 
3490 		ret_val = hw->phy.ops.write_reg(hw,
3491 		                             PHY_REG(BM_PORT_CTRL_PAGE, 27),
3492 		                             hw->fc.pause_time);
3493 		if (ret_val)
3494 			goto out;
3495 	}
3496 
3497 	ret_val = e1000_set_fc_watermarks_generic(hw);
3498 
3499 out:
3500 	return ret_val;
3501 }
3502 
3503 /**
3504  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3505  *  @hw: pointer to the HW structure
3506  *
3507  *  Configures the kumeran interface to the PHY to wait the appropriate time
3508  *  when polling the PHY, then call the generic setup_copper_link to finish
3509  *  configuring the copper link.
3510  **/
3511 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3512 {
3513 	u32 ctrl;
3514 	s32 ret_val;
3515 	u16 reg_data;
3516 
3517 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3518 
3519 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3520 	ctrl |= E1000_CTRL_SLU;
3521 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3522 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3523 
3524 	/*
3525 	 * Set the mac to wait the maximum time between each iteration
3526 	 * and increase the max iterations when polling the phy;
3527 	 * this fixes erroneous timeouts at 10Mbps.
3528 	 */
3529 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3530 	                                       0xFFFF);
3531 	if (ret_val)
3532 		goto out;
3533 	ret_val = e1000_read_kmrn_reg_generic(hw,
3534 	                                      E1000_KMRNCTRLSTA_INBAND_PARAM,
3535 	                                      &reg_data);
3536 	if (ret_val)
3537 		goto out;
3538 	reg_data |= 0x3F;
3539 	ret_val = e1000_write_kmrn_reg_generic(hw,
3540 	                                       E1000_KMRNCTRLSTA_INBAND_PARAM,
3541 	                                       reg_data);
3542 	if (ret_val)
3543 		goto out;
3544 
3545 	switch (hw->phy.type) {
3546 	case e1000_phy_igp_3:
3547 		ret_val = e1000_copper_link_setup_igp(hw);
3548 		if (ret_val)
3549 			goto out;
3550 		break;
3551 	case e1000_phy_bm:
3552 	case e1000_phy_82578:
3553 		ret_val = e1000_copper_link_setup_m88(hw);
3554 		if (ret_val)
3555 			goto out;
3556 		break;
3557 	case e1000_phy_82577:
3558 	case e1000_phy_82579:
3559 		ret_val = e1000_copper_link_setup_82577(hw);
3560 		if (ret_val)
3561 			goto out;
3562 		break;
3563 	case e1000_phy_ife:
3564 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3565 		                               &reg_data);
3566 		if (ret_val)
3567 			goto out;
3568 
3569 		reg_data &= ~IFE_PMC_AUTO_MDIX;
3570 
3571 		switch (hw->phy.mdix) {
3572 		case 1:
3573 			reg_data &= ~IFE_PMC_FORCE_MDIX;
3574 			break;
3575 		case 2:
3576 			reg_data |= IFE_PMC_FORCE_MDIX;
3577 			break;
3578 		case 0:
3579 		default:
3580 			reg_data |= IFE_PMC_AUTO_MDIX;
3581 			break;
3582 		}
3583 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3584 		                                reg_data);
3585 		if (ret_val)
3586 			goto out;
3587 		break;
3588 	default:
3589 		break;
3590 	}
3591 	ret_val = e1000_setup_copper_link_generic(hw);
3592 
3593 out:
3594 	return ret_val;
3595 }
3596 
3597 /**
3598  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3599  *  @hw: pointer to the HW structure
3600  *  @speed: pointer to store current link speed
3601  *  @duplex: pointer to store the current link duplex
3602  *
3603  *  Calls the generic get_speed_and_duplex to retrieve the current link
3604  *  information and then calls the Kumeran lock loss workaround for links at
3605  *  gigabit speeds.
3606  **/
3607 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3608                                           u16 *duplex)
3609 {
3610 	s32 ret_val;
3611 
3612 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3613 
3614 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3615 	if (ret_val)
3616 		goto out;
3617 
3618 	if ((hw->mac.type == e1000_ich8lan) &&
3619 	    (hw->phy.type == e1000_phy_igp_3) &&
3620 	    (*speed == SPEED_1000)) {
3621 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3622 	}
3623 
3624 out:
3625 	return ret_val;
3626 }
3627 
3628 /**
3629  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3630  *  @hw: pointer to the HW structure
3631  *
3632  *  Work-around for 82566 Kumeran PCS lock loss:
3633  *  On link status change (i.e. PCI reset, speed change) and link is up and
3634  *  speed is gigabit-
3635  *    0) if workaround is optionally disabled do nothing
3636  *    1) wait 1ms for Kumeran link to come up
3637  *    2) check Kumeran Diagnostic register PCS lock loss bit
3638  *    3) if not set the link is locked (all is good), otherwise...
3639  *    4) reset the PHY
3640  *    5) repeat up to 10 times
3641  *  Note: this is only called for IGP3 copper when speed is 1gb.
3642  **/
3643 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3644 {
3645 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3646 	u32 phy_ctrl;
3647 	s32 ret_val = E1000_SUCCESS;
3648 	u16 i, data;
3649 	bool link;
3650 
3651 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3652 
3653 	if (!(dev_spec->kmrn_lock_loss_workaround_enabled))
3654 		goto out;
3655 
3656 	/*
3657 	 * Make sure link is up before proceeding.  If not just return.
3658 	 * Attempting this while link is negotiating fouled up link
3659 	 * stability
3660 	 */
3661 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3662 	if (!link) {
3663 		ret_val = E1000_SUCCESS;
3664 		goto out;
3665 	}
3666 
3667 	for (i = 0; i < 10; i++) {
3668 		/* read once to clear */
3669 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3670 		if (ret_val)
3671 			goto out;
3672 		/* and again to get new status */
3673 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3674 		if (ret_val)
3675 			goto out;
3676 
3677 		/* check for PCS lock */
3678 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3679 			ret_val = E1000_SUCCESS;
3680 			goto out;
3681 		}
3682 
3683 		/* Issue PHY reset */
3684 		hw->phy.ops.reset(hw);
3685 		msec_delay_irq(5);
3686 	}
3687 	/* Disable GigE link negotiation */
3688 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3689 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3690 	             E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3691 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3692 
3693 	/*
3694 	 * Call gig speed drop workaround on Gig disable before accessing
3695 	 * any PHY registers
3696 	 */
3697 	e1000_gig_downshift_workaround_ich8lan(hw);
3698 
3699 	/* unable to acquire PCS lock */
3700 	ret_val = -E1000_ERR_PHY;
3701 
3702 out:
3703 	return ret_val;
3704 }
3705 
3706 /**
3707  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3708  *  @hw: pointer to the HW structure
3709  *  @state: boolean value used to set the current Kumeran workaround state
3710  *
3711  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
3712  *  /disabled - FALSE).
3713  **/
3714 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3715                                                  bool state)
3716 {
3717 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3718 
3719 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3720 
3721 	if (hw->mac.type != e1000_ich8lan) {
3722 		DEBUGOUT("Workaround applies to ICH8 only.\n");
3723 		return;
3724 	}
3725 
3726 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
3727 
3728 	return;
3729 }
3730 
3731 /**
3732  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3733  *  @hw: pointer to the HW structure
3734  *
3735  *  Workaround for 82566 power-down on D3 entry:
3736  *    1) disable gigabit link
3737  *    2) write VR power-down enable
3738  *    3) read it back
3739  *  Continue if successful, else issue LCD reset and repeat
3740  **/
3741 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3742 {
3743 	u32 reg;
3744 	u16 data;
3745 	u8  retry = 0;
3746 
3747 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3748 
3749 	if (hw->phy.type != e1000_phy_igp_3)
3750 		goto out;
3751 
3752 	/* Try the workaround twice (if needed) */
3753 	do {
3754 		/* Disable link */
3755 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3756 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3757 		        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3758 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3759 
3760 		/*
3761 		 * Call gig speed drop workaround on Gig disable before
3762 		 * accessing any PHY registers
3763 		 */
3764 		if (hw->mac.type == e1000_ich8lan)
3765 			e1000_gig_downshift_workaround_ich8lan(hw);
3766 
3767 		/* Write VR power-down enable */
3768 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3769 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3770 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3771 		                   data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3772 
3773 		/* Read it back and test */
3774 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3775 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3776 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3777 			break;
3778 
3779 		/* Issue PHY reset and repeat at most one more time */
3780 		reg = E1000_READ_REG(hw, E1000_CTRL);
3781 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3782 		retry++;
3783 	} while (retry);
3784 
3785 out:
3786 	return;
3787 }
3788 
3789 /**
3790  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3791  *  @hw: pointer to the HW structure
3792  *
3793  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3794  *  LPLU, Gig disable, MDIC PHY reset):
3795  *    1) Set Kumeran Near-end loopback
3796  *    2) Clear Kumeran Near-end loopback
3797  *  Should only be called for ICH8[m] devices with IGP_3 Phy.
3798  **/
3799 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3800 {
3801 	s32 ret_val = E1000_SUCCESS;
3802 	u16 reg_data;
3803 
3804 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3805 
3806 	if ((hw->mac.type != e1000_ich8lan) ||
3807 	    (hw->phy.type != e1000_phy_igp_3))
3808 		goto out;
3809 
3810 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3811 	                                      &reg_data);
3812 	if (ret_val)
3813 		goto out;
3814 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3815 	ret_val = e1000_write_kmrn_reg_generic(hw,
3816 	                                       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3817 	                                       reg_data);
3818 	if (ret_val)
3819 		goto out;
3820 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3821 	ret_val = e1000_write_kmrn_reg_generic(hw,
3822 	                                       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3823 	                                       reg_data);
3824 out:
3825 	return;
3826 }
3827 
3828 /**
3829  *  e1000_disable_gig_wol_ich8lan - disable gig during WoL
3830  *  @hw: pointer to the HW structure
3831  *
3832  *  During S0 to Sx transition, it is possible the link remains at gig
3833  *  instead of negotiating to a lower speed.  Before going to Sx, set
3834  *  'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3835  *  to a lower speed.
3836  *
3837  *  Should only be called for applicable parts.
3838  **/
3839 void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3840 {
3841 	u32 phy_ctrl;
3842 	s32 ret_val;
3843 
3844 	DEBUGFUNC("e1000_disable_gig_wol_ich8lan");
3845 
3846 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3847 	phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3848 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3849 
3850 	if (hw->mac.type >= e1000_pchlan) {
3851 		e1000_oem_bits_config_ich8lan(hw, FALSE);
3852 		ret_val = hw->phy.ops.acquire(hw);
3853 		if (ret_val)
3854 			return;
3855 		e1000_write_smbus_addr(hw);
3856 		hw->phy.ops.release(hw);
3857 	}
3858 
3859 	return;
3860 }
3861 
3862 /**
3863  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
3864  *  @hw: pointer to the HW structure
3865  *
3866  *  Return the LED back to the default configuration.
3867  **/
3868 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3869 {
3870 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
3871 
3872 	if (hw->phy.type == e1000_phy_ife)
3873 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3874 		                             0);
3875 
3876 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3877 	return E1000_SUCCESS;
3878 }
3879 
3880 /**
3881  *  e1000_led_on_ich8lan - Turn LEDs on
3882  *  @hw: pointer to the HW structure
3883  *
3884  *  Turn on the LEDs.
3885  **/
3886 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3887 {
3888 	DEBUGFUNC("e1000_led_on_ich8lan");
3889 
3890 	if (hw->phy.type == e1000_phy_ife)
3891 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3892 		                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3893 
3894 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3895 	return E1000_SUCCESS;
3896 }
3897 
3898 /**
3899  *  e1000_led_off_ich8lan - Turn LEDs off
3900  *  @hw: pointer to the HW structure
3901  *
3902  *  Turn off the LEDs.
3903  **/
3904 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3905 {
3906 	DEBUGFUNC("e1000_led_off_ich8lan");
3907 
3908 	if (hw->phy.type == e1000_phy_ife)
3909 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3910 		               (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3911 
3912 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3913 	return E1000_SUCCESS;
3914 }
3915 
3916 /**
3917  *  e1000_setup_led_pchlan - Configures SW controllable LED
3918  *  @hw: pointer to the HW structure
3919  *
3920  *  This prepares the SW controllable LED for use.
3921  **/
3922 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3923 {
3924 	DEBUGFUNC("e1000_setup_led_pchlan");
3925 
3926 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3927 					(u16)hw->mac.ledctl_mode1);
3928 }
3929 
3930 /**
3931  *  e1000_cleanup_led_pchlan - Restore the default LED operation
3932  *  @hw: pointer to the HW structure
3933  *
3934  *  Return the LED back to the default configuration.
3935  **/
3936 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3937 {
3938 	DEBUGFUNC("e1000_cleanup_led_pchlan");
3939 
3940 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3941 					(u16)hw->mac.ledctl_default);
3942 }
3943 
3944 /**
3945  *  e1000_led_on_pchlan - Turn LEDs on
3946  *  @hw: pointer to the HW structure
3947  *
3948  *  Turn on the LEDs.
3949  **/
3950 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3951 {
3952 	u16 data = (u16)hw->mac.ledctl_mode2;
3953 	u32 i, led;
3954 
3955 	DEBUGFUNC("e1000_led_on_pchlan");
3956 
3957 	/*
3958 	 * If no link, then turn LED on by setting the invert bit
3959 	 * for each LED that's mode is "link_up" in ledctl_mode2.
3960 	 */
3961 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3962 		for (i = 0; i < 3; i++) {
3963 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3964 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
3965 			    E1000_LEDCTL_MODE_LINK_UP)
3966 				continue;
3967 			if (led & E1000_PHY_LED0_IVRT)
3968 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3969 			else
3970 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
3971 		}
3972 	}
3973 
3974 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3975 }
3976 
3977 /**
3978  *  e1000_led_off_pchlan - Turn LEDs off
3979  *  @hw: pointer to the HW structure
3980  *
3981  *  Turn off the LEDs.
3982  **/
3983 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3984 {
3985 	u16 data = (u16)hw->mac.ledctl_mode1;
3986 	u32 i, led;
3987 
3988 	DEBUGFUNC("e1000_led_off_pchlan");
3989 
3990 	/*
3991 	 * If no link, then turn LED off by clearing the invert bit
3992 	 * for each LED that's mode is "link_up" in ledctl_mode1.
3993 	 */
3994 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3995 		for (i = 0; i < 3; i++) {
3996 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3997 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
3998 			    E1000_LEDCTL_MODE_LINK_UP)
3999 				continue;
4000 			if (led & E1000_PHY_LED0_IVRT)
4001 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4002 			else
4003 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4004 		}
4005 	}
4006 
4007 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4008 }
4009 
4010 /**
4011  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4012  *  @hw: pointer to the HW structure
4013  *
4014  *  Read appropriate register for the config done bit for completion status
4015  *  and configure the PHY through s/w for EEPROM-less parts.
4016  *
4017  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4018  *  config done bit, so only an error is logged and continues.  If we were
4019  *  to return with error, EEPROM-less silicon would not be able to be reset
4020  *  or change link.
4021  **/
4022 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4023 {
4024 	s32 ret_val = E1000_SUCCESS;
4025 	u32 bank = 0;
4026 	u32 status;
4027 
4028 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4029 
4030 	e1000_get_cfg_done_generic(hw);
4031 
4032 	/* Wait for indication from h/w that it has completed basic config */
4033 	if (hw->mac.type >= e1000_ich10lan) {
4034 		e1000_lan_init_done_ich8lan(hw);
4035 	} else {
4036 		ret_val = e1000_get_auto_rd_done_generic(hw);
4037 		if (ret_val) {
4038 			/*
4039 			 * When auto config read does not complete, do not
4040 			 * return with an error. This can happen in situations
4041 			 * where there is no eeprom and prevents getting link.
4042 			 */
4043 			DEBUGOUT("Auto Read Done did not complete\n");
4044 			ret_val = E1000_SUCCESS;
4045 		}
4046 	}
4047 
4048 	/* Clear PHY Reset Asserted bit */
4049 	status = E1000_READ_REG(hw, E1000_STATUS);
4050 	if (status & E1000_STATUS_PHYRA)
4051 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4052 	else
4053 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4054 
4055 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4056 	if (hw->mac.type <= e1000_ich9lan) {
4057 		if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4058 		    (hw->phy.type == e1000_phy_igp_3)) {
4059 			e1000_phy_init_script_igp3(hw);
4060 		}
4061 	} else {
4062 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4063 			/* Maybe we should do a basic PHY config */
4064 			DEBUGOUT("EEPROM not present\n");
4065 			ret_val = -E1000_ERR_CONFIG;
4066 		}
4067 	}
4068 
4069 	return ret_val;
4070 }
4071 
4072 /**
4073  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4074  * @hw: pointer to the HW structure
4075  *
4076  * In the case of a PHY power down to save power, or to turn off link during a
4077  * driver unload, or wake on lan is not enabled, remove the link.
4078  **/
4079 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4080 {
4081 	/* If the management interface is not enabled, then power down */
4082 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4083 	      hw->phy.ops.check_reset_block(hw)))
4084 		e1000_power_down_phy_copper(hw);
4085 
4086 	return;
4087 }
4088 
4089 /**
4090  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4091  *  @hw: pointer to the HW structure
4092  *
4093  *  Clears hardware counters specific to the silicon family and calls
4094  *  clear_hw_cntrs_generic to clear all general purpose counters.
4095  **/
4096 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4097 {
4098 	u16 phy_data;
4099 
4100 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4101 
4102 	e1000_clear_hw_cntrs_base_generic(hw);
4103 
4104 	E1000_READ_REG(hw, E1000_ALGNERRC);
4105 	E1000_READ_REG(hw, E1000_RXERRC);
4106 	E1000_READ_REG(hw, E1000_TNCRS);
4107 	E1000_READ_REG(hw, E1000_CEXTERR);
4108 	E1000_READ_REG(hw, E1000_TSCTC);
4109 	E1000_READ_REG(hw, E1000_TSCTFC);
4110 
4111 	E1000_READ_REG(hw, E1000_MGTPRC);
4112 	E1000_READ_REG(hw, E1000_MGTPDC);
4113 	E1000_READ_REG(hw, E1000_MGTPTC);
4114 
4115 	E1000_READ_REG(hw, E1000_IAC);
4116 	E1000_READ_REG(hw, E1000_ICRXOC);
4117 
4118 	/* Clear PHY statistics registers */
4119 	if ((hw->phy.type == e1000_phy_82578) ||
4120 	    (hw->phy.type == e1000_phy_82579) ||
4121 	    (hw->phy.type == e1000_phy_82577)) {
4122 		hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
4123 		hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
4124 		hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
4125 		hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
4126 		hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
4127 		hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
4128 		hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
4129 		hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
4130 		hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
4131 		hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
4132 		hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
4133 		hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
4134 		hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
4135 		hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
4136 	}
4137 }
4138 
4139