xref: /dragonfly/sys/dev/netif/ig_hal/e1000_ich8lan.c (revision 89a89091)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2009, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD: $*/
34 
35 /*
36  * 82562G 10/100 Network Connection
37  * 82562G-2 10/100 Network Connection
38  * 82562GT 10/100 Network Connection
39  * 82562GT-2 10/100 Network Connection
40  * 82562V 10/100 Network Connection
41  * 82562V-2 10/100 Network Connection
42  * 82566DC-2 Gigabit Network Connection
43  * 82566DC Gigabit Network Connection
44  * 82566DM-2 Gigabit Network Connection
45  * 82566DM Gigabit Network Connection
46  * 82566MC Gigabit Network Connection
47  * 82566MM Gigabit Network Connection
48  * 82567LM Gigabit Network Connection
49  * 82567LF Gigabit Network Connection
50  * 82567V Gigabit Network Connection
51  * 82567LM-2 Gigabit Network Connection
52  * 82567LF-2 Gigabit Network Connection
53  * 82567V-2 Gigabit Network Connection
54  * 82567LF-3 Gigabit Network Connection
55  * 82567LM-3 Gigabit Network Connection
56  * 82567LM-4 Gigabit Network Connection
57  * 82577LM Gigabit Network Connection
58  * 82577LC Gigabit Network Connection
59  * 82578DM Gigabit Network Connection
60  * 82578DC Gigabit Network Connection
61  * 82579LM Gigabit Network Connection
62  * 82579V Gigabit Network Connection
63  */
64 
65 #include "e1000_api.h"
66 
67 static s32  e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32  e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32  e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
79                                               u8 *mc_addr_list,
80                                               u32 mc_addr_count);
81 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
82 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
83 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
84 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
85                                             bool active);
86 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
87                                             bool active);
88 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
89                                    u16 words, u16 *data);
90 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
91                                     u16 words, u16 *data);
92 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
93 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
94 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
95                                             u16 *data);
96 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
97 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
98 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
99 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
102 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
103                                            u16 *speed, u16 *duplex);
104 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
105 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
108 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
109 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
110 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
111 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
112 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
113 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
114 static s32  e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
115 static s32  e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
116 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
117 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
118 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
119                                           u32 offset, u8 *data);
120 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
121                                           u8 size, u16 *data);
122 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
123                                           u32 offset, u16 *data);
124 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
125                                                  u32 offset, u8 byte);
126 static s32  e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
127                                            u32 offset, u8 data);
128 static s32  e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
129                                            u8 size, u16 data);
130 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
131 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
133 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
134 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
135 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138 
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142 	struct ich8_hsfsts {
143 		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
144 		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
145 		u16 dael       :1; /* bit 2 Direct Access error Log */
146 		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
147 		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
148 		u16 reserved1  :2; /* bit 13:6 Reserved */
149 		u16 reserved2  :6; /* bit 13:6 Reserved */
150 		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
151 		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
152 	} hsf_status;
153 	u16 regval;
154 };
155 
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159 	struct ich8_hsflctl {
160 		u16 flcgo      :1;   /* 0 Flash Cycle Go */
161 		u16 flcycle    :2;   /* 2:1 Flash Cycle */
162 		u16 reserved   :5;   /* 7:3 Reserved  */
163 		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
164 		u16 flockdn    :6;   /* 15:10 Reserved */
165 	} hsf_ctrl;
166 	u16 regval;
167 };
168 
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171 	struct ich8_flracc {
172 		u32 grra      :8; /* 0:7 GbE region Read Access */
173 		u32 grwa      :8; /* 8:15 GbE region Write Access */
174 		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
175 		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
176 	} hsf_flregacc;
177 	u16 regval;
178 };
179 
180 static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
181 {
182 	u32 ctrl;
183 
184 	DEBUGFUNC("e1000_toggle_lanphypc_value_ich8lan");
185 
186 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
187 	ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
188 	ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
189 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
190 	usec_delay(10);
191 	ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
192 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
193 }
194 
195 /**
196  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
197  *  @hw: pointer to the HW structure
198  *
199  *  Initialize family-specific PHY parameters and function pointers.
200  **/
201 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
202 {
203 	struct e1000_phy_info *phy = &hw->phy;
204 	u32 fwsm;
205 	s32 ret_val = E1000_SUCCESS;
206 
207 	DEBUGFUNC("e1000_init_phy_params_pchlan");
208 
209 	phy->addr                     = 1;
210 	phy->reset_delay_us           = 100;
211 
212 	phy->ops.acquire              = e1000_acquire_swflag_ich8lan;
213 	phy->ops.check_reset_block    = e1000_check_reset_block_ich8lan;
214 	phy->ops.get_cfg_done         = e1000_get_cfg_done_ich8lan;
215 	phy->ops.set_page             = e1000_set_page_igp;
216 	phy->ops.read_reg             = e1000_read_phy_reg_hv;
217 	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
218 	phy->ops.read_reg_page        = e1000_read_phy_reg_page_hv;
219 	phy->ops.release              = e1000_release_swflag_ich8lan;
220 	phy->ops.reset                = e1000_phy_hw_reset_ich8lan;
221 	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
222 	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
223 	phy->ops.write_reg            = e1000_write_phy_reg_hv;
224 	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
225 	phy->ops.write_reg_page       = e1000_write_phy_reg_page_hv;
226 	phy->ops.power_up             = e1000_power_up_phy_copper;
227 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
228 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
229 
230 	/*
231 	 * The MAC-PHY interconnect may still be in SMBus mode
232 	 * after Sx->S0.  If the manageability engine (ME) is
233 	 * disabled, then toggle the LANPHYPC Value bit to force
234 	 * the interconnect to PCIe mode.
235 	 */
236 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
237 	if (!(fwsm & E1000_ICH_FWSM_FW_VALID) &&
238 	    !hw->phy.ops.check_reset_block(hw)) {
239 		e1000_toggle_lanphypc_value_ich8lan(hw);
240 		msec_delay(50);
241 
242 		/*
243 		 * Gate automatic PHY configuration by hardware on
244 		 * non-managed 82579
245 		 */
246 		if (hw->mac.type == e1000_pch2lan)
247 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
248 	}
249 
250 	/*
251 	 * Reset the PHY before any acccess to it.  Doing so, ensures that
252 	 * the PHY is in a known good state before we read/write PHY registers.
253 	 * The generic reset is sufficient here, because we haven't determined
254 	 * the PHY type yet.
255 	 */
256 	ret_val = e1000_phy_hw_reset_generic(hw);
257 	if (ret_val)
258 		goto out;
259 
260 	/* Ungate automatic PHY configuration on non-managed 82579 */
261 	if ((hw->mac.type == e1000_pch2lan) &&
262 	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
263 		msec_delay(10);
264 		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
265 	}
266 
267 	phy->id = e1000_phy_unknown;
268 	switch (hw->mac.type) {
269 	default:
270 		ret_val = e1000_get_phy_id(hw);
271 		if (ret_val)
272 			goto out;
273 		if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
274 			break;
275 		/* fall-through */
276 	case e1000_pch2lan:
277 		/*
278 		 * In case the PHY needs to be in mdio slow mode,
279 		 * set slow mode and try to get the PHY id again.
280 		 */
281 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
282 		if (ret_val)
283 			goto out;
284 		ret_val = e1000_get_phy_id(hw);
285 		if (ret_val)
286 			goto out;
287 		break;
288 	}
289 	phy->type = e1000_get_phy_type_from_id(phy->id);
290 
291 	switch (phy->type) {
292 	case e1000_phy_82577:
293 	case e1000_phy_82579:
294 		phy->ops.check_polarity = e1000_check_polarity_82577;
295 		phy->ops.force_speed_duplex =
296 			e1000_phy_force_speed_duplex_82577;
297 		phy->ops.get_cable_length = e1000_get_cable_length_82577;
298 		phy->ops.get_info = e1000_get_phy_info_82577;
299 		phy->ops.commit = e1000_phy_sw_reset_generic;
300 		break;
301 	case e1000_phy_82578:
302 		phy->ops.check_polarity = e1000_check_polarity_m88;
303 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
304 		phy->ops.get_cable_length = e1000_get_cable_length_m88;
305 		phy->ops.get_info = e1000_get_phy_info_m88;
306 		break;
307 	default:
308 		ret_val = -E1000_ERR_PHY;
309 		break;
310 	}
311 
312 out:
313 	return ret_val;
314 }
315 
316 /**
317  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
318  *  @hw: pointer to the HW structure
319  *
320  *  Initialize family-specific PHY parameters and function pointers.
321  **/
322 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
323 {
324 	struct e1000_phy_info *phy = &hw->phy;
325 	s32 ret_val = E1000_SUCCESS;
326 	u16 i = 0;
327 
328 	DEBUGFUNC("e1000_init_phy_params_ich8lan");
329 
330 	phy->addr                     = 1;
331 	phy->reset_delay_us           = 100;
332 
333 	phy->ops.acquire              = e1000_acquire_swflag_ich8lan;
334 	phy->ops.check_reset_block    = e1000_check_reset_block_ich8lan;
335 	phy->ops.get_cable_length     = e1000_get_cable_length_igp_2;
336 	phy->ops.get_cfg_done         = e1000_get_cfg_done_ich8lan;
337 	phy->ops.read_reg             = e1000_read_phy_reg_igp;
338 	phy->ops.release              = e1000_release_swflag_ich8lan;
339 	phy->ops.reset                = e1000_phy_hw_reset_ich8lan;
340 	phy->ops.set_d0_lplu_state    = e1000_set_d0_lplu_state_ich8lan;
341 	phy->ops.set_d3_lplu_state    = e1000_set_d3_lplu_state_ich8lan;
342 	phy->ops.write_reg            = e1000_write_phy_reg_igp;
343 	phy->ops.power_up             = e1000_power_up_phy_copper;
344 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
345 
346 	/*
347 	 * We may need to do this twice - once for IGP and if that fails,
348 	 * we'll set BM func pointers and try again
349 	 */
350 	ret_val = e1000_determine_phy_address(hw);
351 	if (ret_val) {
352 		phy->ops.write_reg = e1000_write_phy_reg_bm;
353 		phy->ops.read_reg  = e1000_read_phy_reg_bm;
354 		ret_val = e1000_determine_phy_address(hw);
355 		if (ret_val) {
356 			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
357 			goto out;
358 		}
359 	}
360 
361 	phy->id = 0;
362 	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
363 	       (i++ < 100)) {
364 		msec_delay(1);
365 		ret_val = e1000_get_phy_id(hw);
366 		if (ret_val)
367 			goto out;
368 	}
369 
370 	/* Verify phy id */
371 	switch (phy->id) {
372 	case IGP03E1000_E_PHY_ID:
373 		phy->type = e1000_phy_igp_3;
374 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
375 		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
376 		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
377 		phy->ops.get_info = e1000_get_phy_info_igp;
378 		phy->ops.check_polarity = e1000_check_polarity_igp;
379 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
380 		break;
381 	case IFE_E_PHY_ID:
382 	case IFE_PLUS_E_PHY_ID:
383 	case IFE_C_E_PHY_ID:
384 		phy->type = e1000_phy_ife;
385 		phy->autoneg_mask = E1000_ALL_NOT_GIG;
386 		phy->ops.get_info = e1000_get_phy_info_ife;
387 		phy->ops.check_polarity = e1000_check_polarity_ife;
388 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
389 		break;
390 	case BME1000_E_PHY_ID:
391 		phy->type = e1000_phy_bm;
392 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
393 		phy->ops.read_reg = e1000_read_phy_reg_bm;
394 		phy->ops.write_reg = e1000_write_phy_reg_bm;
395 		phy->ops.commit = e1000_phy_sw_reset_generic;
396 		phy->ops.get_info = e1000_get_phy_info_m88;
397 		phy->ops.check_polarity = e1000_check_polarity_m88;
398 		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
399 		break;
400 	default:
401 		ret_val = -E1000_ERR_PHY;
402 		goto out;
403 	}
404 
405 out:
406 	return ret_val;
407 }
408 
409 /**
410  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
411  *  @hw: pointer to the HW structure
412  *
413  *  Initialize family-specific NVM parameters and function
414  *  pointers.
415  **/
416 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
417 {
418 	struct e1000_nvm_info *nvm = &hw->nvm;
419 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
420 	u32 gfpreg, sector_base_addr, sector_end_addr;
421 	s32 ret_val = E1000_SUCCESS;
422 	u16 i;
423 
424 	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
425 
426 	/* Can't read flash registers if the register set isn't mapped. */
427 	if (!hw->flash_address) {
428 		DEBUGOUT("ERROR: Flash registers not mapped\n");
429 		ret_val = -E1000_ERR_CONFIG;
430 		goto out;
431 	}
432 
433 	nvm->type = e1000_nvm_flash_sw;
434 
435 	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
436 
437 	/*
438 	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
439 	 * Add 1 to sector_end_addr since this sector is included in
440 	 * the overall size.
441 	 */
442 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
443 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
444 
445 	/* flash_base_addr is byte-aligned */
446 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
447 
448 	/*
449 	 * find total size of the NVM, then cut in half since the total
450 	 * size represents two separate NVM banks.
451 	 */
452 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
453 	                          << FLASH_SECTOR_ADDR_SHIFT;
454 	nvm->flash_bank_size /= 2;
455 	/* Adjust to word count */
456 	nvm->flash_bank_size /= sizeof(u16);
457 
458 	nvm->word_size = E1000_SHADOW_RAM_WORDS;
459 
460 	/* Clear shadow ram */
461 	for (i = 0; i < nvm->word_size; i++) {
462 		dev_spec->shadow_ram[i].modified = FALSE;
463 		dev_spec->shadow_ram[i].value    = 0xFFFF;
464 	}
465 
466 	/* Function Pointers */
467 	nvm->ops.acquire       = e1000_acquire_nvm_ich8lan;
468 	nvm->ops.release       = e1000_release_nvm_ich8lan;
469 	nvm->ops.read          = e1000_read_nvm_ich8lan;
470 	nvm->ops.update        = e1000_update_nvm_checksum_ich8lan;
471 	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
472 	nvm->ops.validate      = e1000_validate_nvm_checksum_ich8lan;
473 	nvm->ops.write         = e1000_write_nvm_ich8lan;
474 
475 out:
476 	return ret_val;
477 }
478 
479 /**
480  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
481  *  @hw: pointer to the HW structure
482  *
483  *  Initialize family-specific MAC parameters and function
484  *  pointers.
485  **/
486 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
487 {
488 	struct e1000_mac_info *mac = &hw->mac;
489 
490 	DEBUGFUNC("e1000_init_mac_params_ich8lan");
491 
492 	/* Set media type function pointer */
493 	hw->phy.media_type = e1000_media_type_copper;
494 
495 	/* Set mta register count */
496 	mac->mta_reg_count = 32;
497 	/* Set rar entry count */
498 	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
499 	if (mac->type == e1000_ich8lan)
500 		mac->rar_entry_count--;
501 	/* Set if part includes ASF firmware */
502 	mac->asf_firmware_present = TRUE;
503 	/* FWSM register */
504 	mac->has_fwsm = TRUE;
505 	/* ARC subsystem not supported */
506 	mac->arc_subsystem_valid = FALSE;
507 	/* Adaptive IFS supported */
508 	mac->adaptive_ifs = TRUE;
509 
510 	/* Function pointers */
511 
512 	/* bus type/speed/width */
513 	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
514 	/* function id */
515 	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
516 	/* reset */
517 	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
518 	/* hw initialization */
519 	mac->ops.init_hw = e1000_init_hw_ich8lan;
520 	/* link setup */
521 	mac->ops.setup_link = e1000_setup_link_ich8lan;
522 	/* physical interface setup */
523 	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
524 	/* check for link */
525 	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
526 	/* link info */
527 	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
528 	/* multicast address update */
529 	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
530 	/* clear hardware counters */
531 	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
532 
533 	/* LED operations */
534 	switch (mac->type) {
535 	case e1000_ich8lan:
536 	case e1000_ich9lan:
537 	case e1000_ich10lan:
538 		/* check management mode */
539 		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
540 		/* ID LED init */
541 		mac->ops.id_led_init = e1000_id_led_init_generic;
542 		/* blink LED */
543 		mac->ops.blink_led = e1000_blink_led_generic;
544 		/* setup LED */
545 		mac->ops.setup_led = e1000_setup_led_generic;
546 		/* cleanup LED */
547 		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
548 		/* turn on/off LED */
549 		mac->ops.led_on = e1000_led_on_ich8lan;
550 		mac->ops.led_off = e1000_led_off_ich8lan;
551 		break;
552 	case e1000_pch2lan:
553 		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
554 		mac->ops.rar_set = e1000_rar_set_pch2lan;
555 		/* multicast address update for pch2 */
556 		mac->ops.update_mc_addr_list =
557 			e1000_update_mc_addr_list_pch2lan;
558 		/* fall-through */
559 	case e1000_pchlan:
560 		/* check management mode */
561 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
562 		/* ID LED init */
563 		mac->ops.id_led_init = e1000_id_led_init_pchlan;
564 		/* setup LED */
565 		mac->ops.setup_led = e1000_setup_led_pchlan;
566 		/* cleanup LED */
567 		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
568 		/* turn on/off LED */
569 		mac->ops.led_on = e1000_led_on_pchlan;
570 		mac->ops.led_off = e1000_led_off_pchlan;
571 		break;
572 	default:
573 		break;
574 	}
575 
576 	/* Enable PCS Lock-loss workaround for ICH8 */
577 	if (mac->type == e1000_ich8lan)
578 		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
579 
580 	/* Gate automatic PHY configuration by hardware on managed 82579 */
581 	if ((mac->type == e1000_pch2lan) &&
582 	    (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
583 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
584 
585 	return E1000_SUCCESS;
586 }
587 
588 /**
589  *  e1000_set_eee_pchlan - Enable/disable EEE support
590  *  @hw: pointer to the HW structure
591  *
592  *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
593  *  the LPI Control register will remain set only if/when link is up.
594  **/
595 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
596 {
597 	s32 ret_val = E1000_SUCCESS;
598 	u16 phy_reg;
599 
600 	DEBUGFUNC("e1000_set_eee_pchlan");
601 
602 	if (hw->phy.type != e1000_phy_82579)
603 		goto out;
604 
605 	ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
606 	if (ret_val)
607 		goto out;
608 
609 	if (hw->dev_spec.ich8lan.eee_disable)
610 		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
611 	else
612 		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
613 
614 	ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
615 out:
616 	return ret_val;
617 }
618 
619 /**
620  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
621  *  @hw: pointer to the HW structure
622  *
623  *  Checks to see of the link status of the hardware has changed.  If a
624  *  change in link status has been detected, then we read the PHY registers
625  *  to get the current speed/duplex if link exists.
626  **/
627 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
628 {
629 	struct e1000_mac_info *mac = &hw->mac;
630 	s32 ret_val;
631 	bool link;
632 
633 	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
634 
635 	/*
636 	 * We only want to go out to the PHY registers to see if Auto-Neg
637 	 * has completed and/or if our link status has changed.  The
638 	 * get_link_status flag is set upon receiving a Link Status
639 	 * Change or Rx Sequence Error interrupt.
640 	 */
641 	if (!mac->get_link_status) {
642 		ret_val = E1000_SUCCESS;
643 		goto out;
644 	}
645 
646 	/*
647 	 * First we want to see if the MII Status Register reports
648 	 * link.  If so, then we want to get the current speed/duplex
649 	 * of the PHY.
650 	 */
651 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
652 	if (ret_val)
653 		goto out;
654 
655 	if (hw->mac.type == e1000_pchlan) {
656 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
657 		if (ret_val)
658 			goto out;
659 	}
660 
661 	if (!link)
662 		goto out; /* No link detected */
663 
664 	mac->get_link_status = FALSE;
665 
666 	switch (hw->mac.type) {
667 	case e1000_pch2lan:
668 		ret_val = e1000_k1_workaround_lv(hw);
669 		if (ret_val)
670 			goto out;
671 		/* fall-thru */
672 	case e1000_pchlan:
673 		if (hw->phy.type == e1000_phy_82578) {
674 			ret_val = e1000_link_stall_workaround_hv(hw);
675 			if (ret_val)
676 				goto out;
677 		}
678 
679 		/*
680 		 * Workaround for PCHx parts in half-duplex:
681 		 * Set the number of preambles removed from the packet
682 		 * when it is passed from the PHY to the MAC to prevent
683 		 * the MAC from misinterpreting the packet type.
684 		 */
685 		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
686 		    E1000_STATUS_FD) {
687 			u16 preambles;
688 
689 			hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA,
690 			                     &preambles);
691 			preambles &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
692 			preambles |= (4 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
693 			hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
694 			                      preambles);
695 		}
696 		break;
697 	default:
698 		break;
699 	}
700 
701 	/*
702 	 * Check if there was DownShift, must be checked
703 	 * immediately after link-up
704 	 */
705 	e1000_check_downshift_generic(hw);
706 
707 	/* Enable/Disable EEE after link up */
708 	ret_val = e1000_set_eee_pchlan(hw);
709 	if (ret_val)
710 		goto out;
711 
712 	/*
713 	 * If we are forcing speed/duplex, then we simply return since
714 	 * we have already determined whether we have link or not.
715 	 */
716 	if (!mac->autoneg) {
717 		ret_val = -E1000_ERR_CONFIG;
718 		goto out;
719 	}
720 
721 	/*
722 	 * Auto-Neg is enabled.  Auto Speed Detection takes care
723 	 * of MAC speed/duplex configuration.  So we only need to
724 	 * configure Collision Distance in the MAC.
725 	 */
726 	e1000_config_collision_dist_generic(hw);
727 
728 	/*
729 	 * Configure Flow Control now that Auto-Neg has completed.
730 	 * First, we need to restore the desired flow control
731 	 * settings because we may have had to re-autoneg with a
732 	 * different link partner.
733 	 */
734 	ret_val = e1000_config_fc_after_link_up_generic(hw);
735 	if (ret_val)
736 		DEBUGOUT("Error configuring flow control\n");
737 
738 out:
739 	return ret_val;
740 }
741 
742 /**
743  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
744  *  @hw: pointer to the HW structure
745  *
746  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
747  **/
748 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
749 {
750 	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
751 
752 	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
753 	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
754 	switch (hw->mac.type) {
755 	case e1000_ich8lan:
756 	case e1000_ich9lan:
757 	case e1000_ich10lan:
758 		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
759 		break;
760 	case e1000_pchlan:
761 	case e1000_pch2lan:
762 		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
763 		break;
764 	default:
765 		break;
766 	}
767 }
768 
769 /**
770  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
771  *  @hw: pointer to the HW structure
772  *
773  *  Acquires the mutex for performing NVM operations.
774  **/
775 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
776 {
777 	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
778 
779 	return E1000_SUCCESS;
780 }
781 
782 /**
783  *  e1000_release_nvm_ich8lan - Release NVM mutex
784  *  @hw: pointer to the HW structure
785  *
786  *  Releases the mutex used while performing NVM operations.
787  **/
788 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
789 {
790 	DEBUGFUNC("e1000_release_nvm_ich8lan");
791 
792 	return;
793 }
794 
795 /**
796  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
797  *  @hw: pointer to the HW structure
798  *
799  *  Acquires the software control flag for performing PHY and select
800  *  MAC CSR accesses.
801  **/
802 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
803 {
804 	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
805 	s32 ret_val = E1000_SUCCESS;
806 
807 	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
808 
809 	while (timeout) {
810 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
811 		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
812 			break;
813 
814 		msec_delay_irq(1);
815 		timeout--;
816 	}
817 
818 	if (!timeout) {
819 		DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
820 		ret_val = -E1000_ERR_CONFIG;
821 		goto out;
822 	}
823 
824 	timeout = SW_FLAG_TIMEOUT;
825 
826 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
827 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
828 
829 	while (timeout) {
830 		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
831 		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
832 			break;
833 
834 		msec_delay_irq(1);
835 		timeout--;
836 	}
837 
838 	if (!timeout) {
839 		DEBUGOUT("Failed to acquire the semaphore.\n");
840 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
841 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
842 		ret_val = -E1000_ERR_CONFIG;
843 		goto out;
844 	}
845 
846 out:
847 
848 	return ret_val;
849 }
850 
851 /**
852  *  e1000_release_swflag_ich8lan - Release software control flag
853  *  @hw: pointer to the HW structure
854  *
855  *  Releases the software control flag for performing PHY and select
856  *  MAC CSR accesses.
857  **/
858 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
859 {
860 	u32 extcnf_ctrl;
861 
862 	DEBUGFUNC("e1000_release_swflag_ich8lan");
863 
864 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
865 
866 	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
867 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
868 		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
869 	} else {
870 		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
871 	}
872 
873 	return;
874 }
875 
876 /**
877  *  e1000_check_mng_mode_ich8lan - Checks management mode
878  *  @hw: pointer to the HW structure
879  *
880  *  This checks if the adapter has any manageability enabled.
881  *  This is a function pointer entry point only called by read/write
882  *  routines for the PHY and NVM parts.
883  **/
884 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
885 {
886 	u32 fwsm;
887 
888 	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
889 
890 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
891 
892 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
893 	       ((fwsm & E1000_FWSM_MODE_MASK) ==
894 		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
895 }
896 
897 /**
898  *  e1000_check_mng_mode_pchlan - Checks management mode
899  *  @hw: pointer to the HW structure
900  *
901  *  This checks if the adapter has iAMT enabled.
902  *  This is a function pointer entry point only called by read/write
903  *  routines for the PHY and NVM parts.
904  **/
905 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
906 {
907 	u32 fwsm;
908 
909 	DEBUGFUNC("e1000_check_mng_mode_pchlan");
910 
911 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
912 
913 	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
914 	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
915 }
916 
917 /**
918  *  e1000_rar_set_pch2lan - Set receive address register
919  *  @hw: pointer to the HW structure
920  *  @addr: pointer to the receive address
921  *  @index: receive address array register
922  *
923  *  Sets the receive address array register at index to the address passed
924  *  in by addr.  For 82579, RAR[0] is the base address register that is to
925  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
926  *  Use SHRA[0-3] in place of those reserved for ME.
927  **/
928 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
929 {
930 	u32 rar_low, rar_high;
931 
932 	DEBUGFUNC("e1000_rar_set_pch2lan");
933 
934 	/*
935 	 * HW expects these in little endian so we reverse the byte order
936 	 * from network order (big endian) to little endian
937 	 */
938 	rar_low = ((u32) addr[0] |
939 	           ((u32) addr[1] << 8) |
940 	           ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
941 
942 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
943 
944 	/* If MAC address zero, no need to set the AV bit */
945 	if (rar_low || rar_high)
946 		rar_high |= E1000_RAH_AV;
947 
948 	if (index == 0) {
949 		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
950 		E1000_WRITE_FLUSH(hw);
951 		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
952 		E1000_WRITE_FLUSH(hw);
953 		return;
954 	}
955 
956 	if (index < hw->mac.rar_entry_count) {
957 		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
958 		E1000_WRITE_FLUSH(hw);
959 		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
960 		E1000_WRITE_FLUSH(hw);
961 
962 		/* verify the register updates */
963 		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
964 		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
965 			return;
966 
967 		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
968 			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
969 	}
970 
971 	DEBUGOUT1("Failed to write receive address at index %d\n", index);
972 }
973 
974 /**
975  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
976  *  @hw: pointer to the HW structure
977  *  @mc_addr_list: array of multicast addresses to program
978  *  @mc_addr_count: number of multicast addresses to program
979  *
980  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
981  *  The caller must have a packed mc_addr_list of multicast addresses.
982  **/
983 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
984                                               u8 *mc_addr_list,
985                                               u32 mc_addr_count)
986 {
987 	u16 phy_reg = 0;
988 	int i;
989 	s32 ret_val;
990 
991 	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
992 
993 	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
994 
995 	ret_val = hw->phy.ops.acquire(hw);
996 	if (ret_val)
997 		return;
998 
999 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1000 	if (ret_val)
1001 		goto release;
1002 
1003 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
1004 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1005 		                           (u16)(hw->mac.mta_shadow[i] &
1006 		                                 0xFFFF));
1007 		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1008 		                           (u16)((hw->mac.mta_shadow[i] >> 16) &
1009 		                                 0xFFFF));
1010 	}
1011 
1012 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1013 
1014 release:
1015 	hw->phy.ops.release(hw);
1016 }
1017 
1018 /**
1019  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1020  *  @hw: pointer to the HW structure
1021  *
1022  *  Checks if firmware is blocking the reset of the PHY.
1023  *  This is a function pointer entry point only called by
1024  *  reset routines.
1025  **/
1026 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1027 {
1028 	u32 fwsm;
1029 
1030 	DEBUGFUNC("e1000_check_reset_block_ich8lan");
1031 
1032 	if (hw->phy.reset_disable)
1033 		return E1000_BLK_PHY_RESET;
1034 
1035 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1036 
1037 	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1038 	                                        : E1000_BLK_PHY_RESET;
1039 }
1040 
1041 /**
1042  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1043  *  @hw: pointer to the HW structure
1044  *
1045  *  Assumes semaphore already acquired.
1046  *
1047  **/
1048 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1049 {
1050 	u16 phy_data;
1051 	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1052 	s32 ret_val = E1000_SUCCESS;
1053 
1054 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1055 
1056 	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1057 	if (ret_val)
1058 		goto out;
1059 
1060 	phy_data &= ~HV_SMB_ADDR_MASK;
1061 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1062 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1063 	ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1064 
1065 out:
1066 	return ret_val;
1067 }
1068 
1069 /**
1070  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1071  *  @hw:   pointer to the HW structure
1072  *
1073  *  SW should configure the LCD from the NVM extended configuration region
1074  *  as a workaround for certain parts.
1075  **/
1076 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1077 {
1078 	struct e1000_phy_info *phy = &hw->phy;
1079 	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1080 	s32 ret_val = E1000_SUCCESS;
1081 	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1082 
1083 	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1084 
1085 	/*
1086 	 * Initialize the PHY from the NVM on ICH platforms.  This
1087 	 * is needed due to an issue where the NVM configuration is
1088 	 * not properly autoloaded after power transitions.
1089 	 * Therefore, after each PHY reset, we will load the
1090 	 * configuration data out of the NVM manually.
1091 	 */
1092 	switch (hw->mac.type) {
1093 	case e1000_ich8lan:
1094 		if (phy->type != e1000_phy_igp_3)
1095 			return ret_val;
1096 
1097 		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1098 		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1099 			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1100 			break;
1101 		}
1102 		/* Fall-thru */
1103 	case e1000_pchlan:
1104 	case e1000_pch2lan:
1105 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1106 		break;
1107 	default:
1108 		return ret_val;
1109 	}
1110 
1111 	ret_val = hw->phy.ops.acquire(hw);
1112 	if (ret_val)
1113 		return ret_val;
1114 
1115 	data = E1000_READ_REG(hw, E1000_FEXTNVM);
1116 	if (!(data & sw_cfg_mask))
1117 		goto out;
1118 
1119 	/*
1120 	 * Make sure HW does not configure LCD from PHY
1121 	 * extended configuration before SW configuration
1122 	 */
1123 	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1124 	if (!(hw->mac.type == e1000_pch2lan)) {
1125 		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1126 			goto out;
1127 	}
1128 
1129 	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1130 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1131 	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1132 	if (!cnf_size)
1133 		goto out;
1134 
1135 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1136 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1137 
1138 	if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1139 	    (hw->mac.type == e1000_pchlan)) ||
1140 	     (hw->mac.type == e1000_pch2lan)) {
1141 		/*
1142 		 * HW configures the SMBus address and LEDs when the
1143 		 * OEM and LCD Write Enable bits are set in the NVM.
1144 		 * When both NVM bits are cleared, SW will configure
1145 		 * them instead.
1146 		 */
1147 		ret_val = e1000_write_smbus_addr(hw);
1148 		if (ret_val)
1149 			goto out;
1150 
1151 		data = E1000_READ_REG(hw, E1000_LEDCTL);
1152 		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1153 							(u16)data);
1154 		if (ret_val)
1155 			goto out;
1156 	}
1157 
1158 	/* Configure LCD from extended configuration region. */
1159 
1160 	/* cnf_base_addr is in DWORD */
1161 	word_addr = (u16)(cnf_base_addr << 1);
1162 
1163 	for (i = 0; i < cnf_size; i++) {
1164 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1165 					   &reg_data);
1166 		if (ret_val)
1167 			goto out;
1168 
1169 		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1170 					   1, &reg_addr);
1171 		if (ret_val)
1172 			goto out;
1173 
1174 		/* Save off the PHY page for future writes. */
1175 		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1176 			phy_page = reg_data;
1177 			continue;
1178 		}
1179 
1180 		reg_addr &= PHY_REG_MASK;
1181 		reg_addr |= phy_page;
1182 
1183 		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1184 						    reg_data);
1185 		if (ret_val)
1186 			goto out;
1187 	}
1188 
1189 out:
1190 	hw->phy.ops.release(hw);
1191 	return ret_val;
1192 }
1193 
1194 /**
1195  *  e1000_k1_gig_workaround_hv - K1 Si workaround
1196  *  @hw:   pointer to the HW structure
1197  *  @link: link up bool flag
1198  *
1199  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1200  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
1201  *  If link is down, the function will restore the default K1 setting located
1202  *  in the NVM.
1203  **/
1204 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1205 {
1206 	s32 ret_val = E1000_SUCCESS;
1207 	u16 status_reg = 0;
1208 	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1209 
1210 	DEBUGFUNC("e1000_k1_gig_workaround_hv");
1211 
1212 	if (hw->mac.type != e1000_pchlan)
1213 		goto out;
1214 
1215 	/* Wrap the whole flow with the sw flag */
1216 	ret_val = hw->phy.ops.acquire(hw);
1217 	if (ret_val)
1218 		goto out;
1219 
1220 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1221 	if (link) {
1222 		if (hw->phy.type == e1000_phy_82578) {
1223 			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1224 			                                      &status_reg);
1225 			if (ret_val)
1226 				goto release;
1227 
1228 			status_reg &= BM_CS_STATUS_LINK_UP |
1229 			              BM_CS_STATUS_RESOLVED |
1230 			              BM_CS_STATUS_SPEED_MASK;
1231 
1232 			if (status_reg == (BM_CS_STATUS_LINK_UP |
1233 			                   BM_CS_STATUS_RESOLVED |
1234 			                   BM_CS_STATUS_SPEED_1000))
1235 				k1_enable = FALSE;
1236 		}
1237 
1238 		if (hw->phy.type == e1000_phy_82577) {
1239 			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1240 			                                      &status_reg);
1241 			if (ret_val)
1242 				goto release;
1243 
1244 			status_reg &= HV_M_STATUS_LINK_UP |
1245 			              HV_M_STATUS_AUTONEG_COMPLETE |
1246 			              HV_M_STATUS_SPEED_MASK;
1247 
1248 			if (status_reg == (HV_M_STATUS_LINK_UP |
1249 			                   HV_M_STATUS_AUTONEG_COMPLETE |
1250 			                   HV_M_STATUS_SPEED_1000))
1251 				k1_enable = FALSE;
1252 		}
1253 
1254 		/* Link stall fix for link up */
1255 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1256 		                                       0x0100);
1257 		if (ret_val)
1258 			goto release;
1259 
1260 	} else {
1261 		/* Link stall fix for link down */
1262 		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1263 		                                       0x4100);
1264 		if (ret_val)
1265 			goto release;
1266 	}
1267 
1268 	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1269 
1270 release:
1271 	hw->phy.ops.release(hw);
1272 out:
1273 	return ret_val;
1274 }
1275 
1276 /**
1277  *  e1000_configure_k1_ich8lan - Configure K1 power state
1278  *  @hw: pointer to the HW structure
1279  *  @enable: K1 state to configure
1280  *
1281  *  Configure the K1 power state based on the provided parameter.
1282  *  Assumes semaphore already acquired.
1283  *
1284  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1285  **/
1286 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1287 {
1288 	s32 ret_val = E1000_SUCCESS;
1289 	u32 ctrl_reg = 0;
1290 	u32 ctrl_ext = 0;
1291 	u32 reg = 0;
1292 	u16 kmrn_reg = 0;
1293 
1294 	DEBUGFUNC("e1000_configure_k1_ich8lan");
1295 
1296 	ret_val = e1000_read_kmrn_reg_locked(hw,
1297 	                                     E1000_KMRNCTRLSTA_K1_CONFIG,
1298 	                                     &kmrn_reg);
1299 	if (ret_val)
1300 		goto out;
1301 
1302 	if (k1_enable)
1303 		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1304 	else
1305 		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1306 
1307 	ret_val = e1000_write_kmrn_reg_locked(hw,
1308 	                                      E1000_KMRNCTRLSTA_K1_CONFIG,
1309 	                                      kmrn_reg);
1310 	if (ret_val)
1311 		goto out;
1312 
1313 	usec_delay(20);
1314 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1315 	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1316 
1317 	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1318 	reg |= E1000_CTRL_FRCSPD;
1319 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
1320 
1321 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1322 	usec_delay(20);
1323 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1324 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1325 	usec_delay(20);
1326 
1327 out:
1328 	return ret_val;
1329 }
1330 
1331 /**
1332  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1333  *  @hw:       pointer to the HW structure
1334  *  @d0_state: boolean if entering d0 or d3 device state
1335  *
1336  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1337  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
1338  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
1339  **/
1340 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1341 {
1342 	s32 ret_val = 0;
1343 	u32 mac_reg;
1344 	u16 oem_reg;
1345 
1346 	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1347 
1348 	if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1349 		return ret_val;
1350 
1351 	ret_val = hw->phy.ops.acquire(hw);
1352 	if (ret_val)
1353 		return ret_val;
1354 
1355 	if (!(hw->mac.type == e1000_pch2lan)) {
1356 		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1357 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1358 			goto out;
1359 	}
1360 
1361 	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1362 	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1363 		goto out;
1364 
1365 	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1366 
1367 	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1368 	if (ret_val)
1369 		goto out;
1370 
1371 	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1372 
1373 	if (d0_state) {
1374 		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1375 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1376 
1377 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1378 			oem_reg |= HV_OEM_BITS_LPLU;
1379 	} else {
1380 		if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1381 			oem_reg |= HV_OEM_BITS_GBE_DIS;
1382 
1383 		if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1384 			oem_reg |= HV_OEM_BITS_LPLU;
1385 	}
1386 	/* Restart auto-neg to activate the bits */
1387 	if (!hw->phy.ops.check_reset_block(hw))
1388 		oem_reg |= HV_OEM_BITS_RESTART_AN;
1389 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1390 
1391 out:
1392 	hw->phy.ops.release(hw);
1393 
1394 	return ret_val;
1395 }
1396 
1397 
1398 /**
1399  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1400  *  @hw:   pointer to the HW structure
1401  **/
1402 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1403 {
1404 	s32 ret_val;
1405 	u16 data;
1406 
1407 	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1408 
1409 	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1410 	if (ret_val)
1411 		return ret_val;
1412 
1413 	data |= HV_KMRN_MDIO_SLOW;
1414 
1415 	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1416 
1417 	return ret_val;
1418 }
1419 
1420 /**
1421  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1422  *  done after every PHY reset.
1423  **/
1424 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1425 {
1426 	s32 ret_val = E1000_SUCCESS;
1427 	u16 phy_data;
1428 
1429 	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1430 
1431 	if (hw->mac.type != e1000_pchlan)
1432 		goto out;
1433 
1434 	/* Set MDIO slow mode before any other MDIO access */
1435 	if (hw->phy.type == e1000_phy_82577) {
1436 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
1437 		if (ret_val)
1438 			goto out;
1439 	}
1440 
1441 	if (((hw->phy.type == e1000_phy_82577) &&
1442 	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1443 	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1444 		/* Disable generation of early preamble */
1445 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1446 		if (ret_val)
1447 			goto out;
1448 
1449 		/* Preamble tuning for SSC */
1450 		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1451 		if (ret_val)
1452 			goto out;
1453 	}
1454 
1455 	if (hw->phy.type == e1000_phy_82578) {
1456 		/*
1457 		 * Return registers to default by doing a soft reset then
1458 		 * writing 0x3140 to the control register.
1459 		 */
1460 		if (hw->phy.revision < 2) {
1461 			e1000_phy_sw_reset_generic(hw);
1462 			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1463 			                                0x3140);
1464 		}
1465 	}
1466 
1467 	/* Select page 0 */
1468 	ret_val = hw->phy.ops.acquire(hw);
1469 	if (ret_val)
1470 		goto out;
1471 
1472 	hw->phy.addr = 1;
1473 	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1474 	hw->phy.ops.release(hw);
1475 	if (ret_val)
1476 		goto out;
1477 
1478 	/*
1479 	 * Configure the K1 Si workaround during phy reset assuming there is
1480 	 * link so that it disables K1 if link is in 1Gbps.
1481 	 */
1482 	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1483 	if (ret_val)
1484 		goto out;
1485 
1486 	/* Workaround for link disconnects on a busy hub in half duplex */
1487 	ret_val = hw->phy.ops.acquire(hw);
1488 	if (ret_val)
1489 		goto out;
1490 	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG_REG,
1491 	                                      &phy_data);
1492 	if (ret_val)
1493 		goto release;
1494 	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG_REG,
1495 	                                       phy_data & 0x00FF);
1496 release:
1497 	hw->phy.ops.release(hw);
1498 out:
1499 	return ret_val;
1500 }
1501 
1502 /**
1503  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1504  *  @hw:   pointer to the HW structure
1505  **/
1506 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1507 {
1508 	u32 mac_reg;
1509 	u16 i, phy_reg = 0;
1510 	s32 ret_val;
1511 
1512 	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1513 
1514 	ret_val = hw->phy.ops.acquire(hw);
1515 	if (ret_val)
1516 		return;
1517 	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1518 	if (ret_val)
1519 		goto release;
1520 
1521 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1522 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1523 		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1524 		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1525 		                           (u16)(mac_reg & 0xFFFF));
1526 		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1527 		                           (u16)((mac_reg >> 16) & 0xFFFF));
1528 
1529 		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1530 		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1531 		                           (u16)(mac_reg & 0xFFFF));
1532 		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1533 		                           (u16)((mac_reg & E1000_RAH_AV)
1534 		                                 >> 16));
1535 	}
1536 
1537 	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1538 
1539 release:
1540 	hw->phy.ops.release(hw);
1541 }
1542 
1543 static u32 e1000_calc_rx_da_crc(u8 mac[])
1544 {
1545 	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
1546 	u32 i, j, mask, crc;
1547 
1548 	DEBUGFUNC("e1000_calc_rx_da_crc");
1549 
1550 	crc = 0xffffffff;
1551 	for (i = 0; i < 6; i++) {
1552 		crc = crc ^ mac[i];
1553 		for (j = 8; j > 0; j--) {
1554 			mask = (crc & 1) * (-1);
1555 			crc = (crc >> 1) ^ (poly & mask);
1556 		}
1557 	}
1558 	return ~crc;
1559 }
1560 
1561 /**
1562  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1563  *  with 82579 PHY
1564  *  @hw: pointer to the HW structure
1565  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
1566  **/
1567 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1568 {
1569 	s32 ret_val = E1000_SUCCESS;
1570 	u16 phy_reg, data;
1571 	u32 mac_reg;
1572 	u16 i;
1573 
1574 	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1575 
1576 	if ((hw->mac.type != e1000_pch2lan) &&
1577 	    (hw->phy.type != e1000_phy_82579))
1578 		goto out;
1579 
1580 	/* disable Rx path while enabling/disabling workaround */
1581 	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1582 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1583 	if (ret_val)
1584 		goto out;
1585 
1586 	if (enable) {
1587 		/*
1588 		 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1589 		 * SHRAL/H) and initial CRC values to the MAC
1590 		 */
1591 		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1592 			u8 mac_addr[ETH_ADDR_LEN] = {0};
1593 			u32 addr_high, addr_low;
1594 
1595 			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1596 			if (!(addr_high & E1000_RAH_AV))
1597 				continue;
1598 			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1599 			mac_addr[0] = (addr_low & 0xFF);
1600 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
1601 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
1602 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
1603 			mac_addr[4] = (addr_high & 0xFF);
1604 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
1605 
1606 			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1607 					e1000_calc_rx_da_crc(mac_addr));
1608 		}
1609 
1610 		/* Write Rx addresses to the PHY */
1611 		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1612 
1613 		/* Enable jumbo frame workaround in the MAC */
1614 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1615 		mac_reg &= ~(1 << 14);
1616 		mac_reg |= (7 << 15);
1617 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1618 
1619 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1620 		mac_reg |= E1000_RCTL_SECRC;
1621 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1622 
1623 		ret_val = e1000_read_kmrn_reg_generic(hw,
1624 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1625 						&data);
1626 		if (ret_val)
1627 			goto out;
1628 		ret_val = e1000_write_kmrn_reg_generic(hw,
1629 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1630 						data | (1 << 0));
1631 		if (ret_val)
1632 			goto out;
1633 		ret_val = e1000_read_kmrn_reg_generic(hw,
1634 						E1000_KMRNCTRLSTA_HD_CTRL,
1635 						&data);
1636 		if (ret_val)
1637 			goto out;
1638 		data &= ~(0xF << 8);
1639 		data |= (0xB << 8);
1640 		ret_val = e1000_write_kmrn_reg_generic(hw,
1641 						E1000_KMRNCTRLSTA_HD_CTRL,
1642 						data);
1643 		if (ret_val)
1644 			goto out;
1645 
1646 		/* Enable jumbo frame workaround in the PHY */
1647 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1648 		data &= ~(0x7F << 5);
1649 		data |= (0x37 << 5);
1650 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1651 		if (ret_val)
1652 			goto out;
1653 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1654 		data &= ~(1 << 13);
1655 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1656 		if (ret_val)
1657 			goto out;
1658 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1659 		data &= ~(0x3FF << 2);
1660 		data |= (0x1A << 2);
1661 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1662 		if (ret_val)
1663 			goto out;
1664 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00);
1665 		if (ret_val)
1666 			goto out;
1667 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1668 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10));
1669 		if (ret_val)
1670 			goto out;
1671 	} else {
1672 		/* Write MAC register values back to h/w defaults */
1673 		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1674 		mac_reg &= ~(0xF << 14);
1675 		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1676 
1677 		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1678 		mac_reg &= ~E1000_RCTL_SECRC;
1679 		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1680 
1681 		ret_val = e1000_read_kmrn_reg_generic(hw,
1682 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1683 						&data);
1684 		if (ret_val)
1685 			goto out;
1686 		ret_val = e1000_write_kmrn_reg_generic(hw,
1687 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
1688 						data & ~(1 << 0));
1689 		if (ret_val)
1690 			goto out;
1691 		ret_val = e1000_read_kmrn_reg_generic(hw,
1692 						E1000_KMRNCTRLSTA_HD_CTRL,
1693 						&data);
1694 		if (ret_val)
1695 			goto out;
1696 		data &= ~(0xF << 8);
1697 		data |= (0xB << 8);
1698 		ret_val = e1000_write_kmrn_reg_generic(hw,
1699 						E1000_KMRNCTRLSTA_HD_CTRL,
1700 						data);
1701 		if (ret_val)
1702 			goto out;
1703 
1704 		/* Write PHY register values back to h/w defaults */
1705 		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1706 		data &= ~(0x7F << 5);
1707 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1708 		if (ret_val)
1709 			goto out;
1710 		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1711 		data |= (1 << 13);
1712 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1713 		if (ret_val)
1714 			goto out;
1715 		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1716 		data &= ~(0x3FF << 2);
1717 		data |= (0x8 << 2);
1718 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1719 		if (ret_val)
1720 			goto out;
1721 		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1722 		if (ret_val)
1723 			goto out;
1724 		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1725 		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10));
1726 		if (ret_val)
1727 			goto out;
1728 	}
1729 
1730 	/* re-enable Rx path after enabling/disabling workaround */
1731 	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1732 
1733 out:
1734 	return ret_val;
1735 }
1736 
1737 /**
1738  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1739  *  done after every PHY reset.
1740  **/
1741 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1742 {
1743 	s32 ret_val = E1000_SUCCESS;
1744 
1745 	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1746 
1747 	if (hw->mac.type != e1000_pch2lan)
1748 		goto out;
1749 
1750 	/* Set MDIO slow mode before any other MDIO access */
1751 	ret_val = e1000_set_mdio_slow_mode_hv(hw);
1752 
1753 out:
1754 	return ret_val;
1755 }
1756 
1757 /**
1758  *  e1000_k1_gig_workaround_lv - K1 Si workaround
1759  *  @hw:   pointer to the HW structure
1760  *
1761  *  Workaround to set the K1 beacon duration for 82579 parts
1762  **/
1763 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1764 {
1765 	s32 ret_val = E1000_SUCCESS;
1766 	u16 status_reg = 0;
1767 	u32 mac_reg;
1768 
1769 	DEBUGFUNC("e1000_k1_workaround_lv");
1770 
1771 	if (hw->mac.type != e1000_pch2lan)
1772 		goto out;
1773 
1774 	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
1775 	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1776 	if (ret_val)
1777 		goto out;
1778 
1779 	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1780 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1781 		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1782 		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1783 
1784 		if (status_reg & HV_M_STATUS_SPEED_1000)
1785 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1786 		else
1787 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1788 
1789 		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1790 	}
1791 
1792 out:
1793 	return ret_val;
1794 }
1795 
1796 /**
1797  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1798  *  @hw:   pointer to the HW structure
1799  *  @gate: boolean set to TRUE to gate, FALSE to ungate
1800  *
1801  *  Gate/ungate the automatic PHY configuration via hardware; perform
1802  *  the configuration via software instead.
1803  **/
1804 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1805 {
1806 	u32 extcnf_ctrl;
1807 
1808 	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1809 
1810 	if (hw->mac.type != e1000_pch2lan)
1811 		return;
1812 
1813 	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1814 
1815 	if (gate)
1816 		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1817 	else
1818 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1819 
1820 	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1821 	return;
1822 }
1823 
1824 /**
1825  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
1826  *  @hw: pointer to the HW structure
1827  *
1828  *  Check the appropriate indication the MAC has finished configuring the
1829  *  PHY after a software reset.
1830  **/
1831 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1832 {
1833 	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1834 
1835 	DEBUGFUNC("e1000_lan_init_done_ich8lan");
1836 
1837 	/* Wait for basic configuration completes before proceeding */
1838 	do {
1839 		data = E1000_READ_REG(hw, E1000_STATUS);
1840 		data &= E1000_STATUS_LAN_INIT_DONE;
1841 		usec_delay(100);
1842 	} while ((!data) && --loop);
1843 
1844 	/*
1845 	 * If basic configuration is incomplete before the above loop
1846 	 * count reaches 0, loading the configuration from NVM will
1847 	 * leave the PHY in a bad state possibly resulting in no link.
1848 	 */
1849 	if (loop == 0)
1850 		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1851 
1852 	/* Clear the Init Done bit for the next init event */
1853 	data = E1000_READ_REG(hw, E1000_STATUS);
1854 	data &= ~E1000_STATUS_LAN_INIT_DONE;
1855 	E1000_WRITE_REG(hw, E1000_STATUS, data);
1856 }
1857 
1858 /**
1859  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1860  *  @hw: pointer to the HW structure
1861  **/
1862 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1863 {
1864 	s32 ret_val = E1000_SUCCESS;
1865 	u16 reg;
1866 
1867 	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1868 
1869 	if (hw->phy.ops.check_reset_block(hw))
1870 		goto out;
1871 
1872 	/* Allow time for h/w to get to quiescent state after reset */
1873 	msec_delay(10);
1874 
1875 	/* Perform any necessary post-reset workarounds */
1876 	switch (hw->mac.type) {
1877 	case e1000_pchlan:
1878 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1879 		if (ret_val)
1880 			goto out;
1881 		break;
1882 	case e1000_pch2lan:
1883 		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1884 		if (ret_val)
1885 			goto out;
1886 		break;
1887 	default:
1888 		break;
1889 	}
1890 
1891 	/* Clear the host wakeup bit after lcd reset */
1892 	if (hw->mac.type >= e1000_pchlan) {
1893 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
1894 		reg &= ~BM_WUC_HOST_WU_BIT;
1895 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
1896 	}
1897 
1898 	/* Configure the LCD with the extended configuration region in NVM */
1899 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
1900 	if (ret_val)
1901 		goto out;
1902 
1903 	/* Configure the LCD with the OEM bits in NVM */
1904 	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1905 
1906 	if (hw->mac.type == e1000_pch2lan) {
1907 		/* Ungate automatic PHY configuration on non-managed 82579 */
1908 		if (!(E1000_READ_REG(hw, E1000_FWSM) &
1909 		    E1000_ICH_FWSM_FW_VALID)) {
1910 			msec_delay(10);
1911 			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1912 		}
1913 
1914 		/* Set EEE LPI Update Timer to 200usec */
1915 		ret_val = hw->phy.ops.acquire(hw);
1916 		if (ret_val)
1917 			goto out;
1918 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1919 						       I82579_LPI_UPDATE_TIMER);
1920 		if (ret_val)
1921 			goto release;
1922 		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1923 						       0x1387);
1924 release:
1925 		hw->phy.ops.release(hw);
1926 	}
1927 
1928 out:
1929 	return ret_val;
1930 }
1931 
1932 /**
1933  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1934  *  @hw: pointer to the HW structure
1935  *
1936  *  Resets the PHY
1937  *  This is a function pointer entry point called by drivers
1938  *  or other shared routines.
1939  **/
1940 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1941 {
1942 	s32 ret_val = E1000_SUCCESS;
1943 
1944 	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1945 
1946 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
1947 	if ((hw->mac.type == e1000_pch2lan) &&
1948 	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
1949 		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
1950 
1951 	ret_val = e1000_phy_hw_reset_generic(hw);
1952 	if (ret_val)
1953 		goto out;
1954 
1955 	ret_val = e1000_post_phy_reset_ich8lan(hw);
1956 
1957 out:
1958 	return ret_val;
1959 }
1960 
1961 /**
1962  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1963  *  @hw: pointer to the HW structure
1964  *  @active: TRUE to enable LPLU, FALSE to disable
1965  *
1966  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
1967  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1968  *  the phy speed. This function will manually set the LPLU bit and restart
1969  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
1970  *  since it configures the same bit.
1971  **/
1972 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1973 {
1974 	s32 ret_val = E1000_SUCCESS;
1975 	u16 oem_reg;
1976 
1977 	DEBUGFUNC("e1000_set_lplu_state_pchlan");
1978 
1979 	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
1980 	if (ret_val)
1981 		goto out;
1982 
1983 	if (active)
1984 		oem_reg |= HV_OEM_BITS_LPLU;
1985 	else
1986 		oem_reg &= ~HV_OEM_BITS_LPLU;
1987 
1988 	oem_reg |= HV_OEM_BITS_RESTART_AN;
1989 	ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
1990 
1991 out:
1992 	return ret_val;
1993 }
1994 
1995 /**
1996  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1997  *  @hw: pointer to the HW structure
1998  *  @active: TRUE to enable LPLU, FALSE to disable
1999  *
2000  *  Sets the LPLU D0 state according to the active flag.  When
2001  *  activating LPLU this function also disables smart speed
2002  *  and vice versa.  LPLU will not be activated unless the
2003  *  device autonegotiation advertisement meets standards of
2004  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2005  *  This is a function pointer entry point only called by
2006  *  PHY setup routines.
2007  **/
2008 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2009 {
2010 	struct e1000_phy_info *phy = &hw->phy;
2011 	u32 phy_ctrl;
2012 	s32 ret_val = E1000_SUCCESS;
2013 	u16 data;
2014 
2015 	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2016 
2017 	if (phy->type == e1000_phy_ife)
2018 		goto out;
2019 
2020 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2021 
2022 	if (active) {
2023 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2024 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2025 
2026 		if (phy->type != e1000_phy_igp_3)
2027 			goto out;
2028 
2029 		/*
2030 		 * Call gig speed drop workaround on LPLU before accessing
2031 		 * any PHY registers
2032 		 */
2033 		if (hw->mac.type == e1000_ich8lan)
2034 			e1000_gig_downshift_workaround_ich8lan(hw);
2035 
2036 		/* When LPLU is enabled, we should disable SmartSpeed */
2037 		ret_val = phy->ops.read_reg(hw,
2038 		                            IGP01E1000_PHY_PORT_CONFIG,
2039 		                            &data);
2040 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2041 		ret_val = phy->ops.write_reg(hw,
2042 		                             IGP01E1000_PHY_PORT_CONFIG,
2043 		                             data);
2044 		if (ret_val)
2045 			goto out;
2046 	} else {
2047 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2048 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2049 
2050 		if (phy->type != e1000_phy_igp_3)
2051 			goto out;
2052 
2053 		/*
2054 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2055 		 * during Dx states where the power conservation is most
2056 		 * important.  During driver activity we should enable
2057 		 * SmartSpeed, so performance is maintained.
2058 		 */
2059 		if (phy->smart_speed == e1000_smart_speed_on) {
2060 			ret_val = phy->ops.read_reg(hw,
2061 			                            IGP01E1000_PHY_PORT_CONFIG,
2062 			                            &data);
2063 			if (ret_val)
2064 				goto out;
2065 
2066 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2067 			ret_val = phy->ops.write_reg(hw,
2068 			                             IGP01E1000_PHY_PORT_CONFIG,
2069 			                             data);
2070 			if (ret_val)
2071 				goto out;
2072 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2073 			ret_val = phy->ops.read_reg(hw,
2074 			                            IGP01E1000_PHY_PORT_CONFIG,
2075 			                            &data);
2076 			if (ret_val)
2077 				goto out;
2078 
2079 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2080 			ret_val = phy->ops.write_reg(hw,
2081 			                             IGP01E1000_PHY_PORT_CONFIG,
2082 			                             data);
2083 			if (ret_val)
2084 				goto out;
2085 		}
2086 	}
2087 
2088 out:
2089 	return ret_val;
2090 }
2091 
2092 /**
2093  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2094  *  @hw: pointer to the HW structure
2095  *  @active: TRUE to enable LPLU, FALSE to disable
2096  *
2097  *  Sets the LPLU D3 state according to the active flag.  When
2098  *  activating LPLU this function also disables smart speed
2099  *  and vice versa.  LPLU will not be activated unless the
2100  *  device autonegotiation advertisement meets standards of
2101  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2102  *  This is a function pointer entry point only called by
2103  *  PHY setup routines.
2104  **/
2105 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2106 {
2107 	struct e1000_phy_info *phy = &hw->phy;
2108 	u32 phy_ctrl;
2109 	s32 ret_val = E1000_SUCCESS;
2110 	u16 data;
2111 
2112 	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2113 
2114 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2115 
2116 	if (!active) {
2117 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2118 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2119 
2120 		if (phy->type != e1000_phy_igp_3)
2121 			goto out;
2122 
2123 		/*
2124 		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2125 		 * during Dx states where the power conservation is most
2126 		 * important.  During driver activity we should enable
2127 		 * SmartSpeed, so performance is maintained.
2128 		 */
2129 		if (phy->smart_speed == e1000_smart_speed_on) {
2130 			ret_val = phy->ops.read_reg(hw,
2131 			                            IGP01E1000_PHY_PORT_CONFIG,
2132 			                            &data);
2133 			if (ret_val)
2134 				goto out;
2135 
2136 			data |= IGP01E1000_PSCFR_SMART_SPEED;
2137 			ret_val = phy->ops.write_reg(hw,
2138 			                             IGP01E1000_PHY_PORT_CONFIG,
2139 			                             data);
2140 			if (ret_val)
2141 				goto out;
2142 		} else if (phy->smart_speed == e1000_smart_speed_off) {
2143 			ret_val = phy->ops.read_reg(hw,
2144 			                            IGP01E1000_PHY_PORT_CONFIG,
2145 			                            &data);
2146 			if (ret_val)
2147 				goto out;
2148 
2149 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2150 			ret_val = phy->ops.write_reg(hw,
2151 			                             IGP01E1000_PHY_PORT_CONFIG,
2152 			                             data);
2153 			if (ret_val)
2154 				goto out;
2155 		}
2156 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2157 	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2158 	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2159 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2160 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2161 
2162 		if (phy->type != e1000_phy_igp_3)
2163 			goto out;
2164 
2165 		/*
2166 		 * Call gig speed drop workaround on LPLU before accessing
2167 		 * any PHY registers
2168 		 */
2169 		if (hw->mac.type == e1000_ich8lan)
2170 			e1000_gig_downshift_workaround_ich8lan(hw);
2171 
2172 		/* When LPLU is enabled, we should disable SmartSpeed */
2173 		ret_val = phy->ops.read_reg(hw,
2174 		                            IGP01E1000_PHY_PORT_CONFIG,
2175 		                            &data);
2176 		if (ret_val)
2177 			goto out;
2178 
2179 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2180 		ret_val = phy->ops.write_reg(hw,
2181 		                             IGP01E1000_PHY_PORT_CONFIG,
2182 		                             data);
2183 	}
2184 
2185 out:
2186 	return ret_val;
2187 }
2188 
2189 /**
2190  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2191  *  @hw: pointer to the HW structure
2192  *  @bank:  pointer to the variable that returns the active bank
2193  *
2194  *  Reads signature byte from the NVM using the flash access registers.
2195  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2196  **/
2197 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2198 {
2199 	u32 eecd;
2200 	struct e1000_nvm_info *nvm = &hw->nvm;
2201 	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2202 	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2203 	u8 sig_byte = 0;
2204 	s32 ret_val = E1000_SUCCESS;
2205 
2206 	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2207 
2208 	switch (hw->mac.type) {
2209 	case e1000_ich8lan:
2210 	case e1000_ich9lan:
2211 		eecd = E1000_READ_REG(hw, E1000_EECD);
2212 		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2213 		    E1000_EECD_SEC1VAL_VALID_MASK) {
2214 			if (eecd & E1000_EECD_SEC1VAL)
2215 				*bank = 1;
2216 			else
2217 				*bank = 0;
2218 
2219 			goto out;
2220 		}
2221 		DEBUGOUT("Unable to determine valid NVM bank via EEC - "
2222 		         "reading flash signature\n");
2223 		/* fall-thru */
2224 	default:
2225 		/* set bank to 0 in case flash read fails */
2226 		*bank = 0;
2227 
2228 		/* Check bank 0 */
2229 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2230 		                                        &sig_byte);
2231 		if (ret_val)
2232 			goto out;
2233 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2234 		    E1000_ICH_NVM_SIG_VALUE) {
2235 			*bank = 0;
2236 			goto out;
2237 		}
2238 
2239 		/* Check bank 1 */
2240 		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2241 		                                        bank1_offset,
2242 		                                        &sig_byte);
2243 		if (ret_val)
2244 			goto out;
2245 		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2246 		    E1000_ICH_NVM_SIG_VALUE) {
2247 			*bank = 1;
2248 			goto out;
2249 		}
2250 
2251 		DEBUGOUT("ERROR: No valid NVM bank present\n");
2252 		ret_val = -E1000_ERR_NVM;
2253 		break;
2254 	}
2255 out:
2256 	return ret_val;
2257 }
2258 
2259 /**
2260  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
2261  *  @hw: pointer to the HW structure
2262  *  @offset: The offset (in bytes) of the word(s) to read.
2263  *  @words: Size of data to read in words
2264  *  @data: Pointer to the word(s) to read at offset.
2265  *
2266  *  Reads a word(s) from the NVM using the flash access registers.
2267  **/
2268 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2269                                   u16 *data)
2270 {
2271 	struct e1000_nvm_info *nvm = &hw->nvm;
2272 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2273 	u32 act_offset;
2274 	s32 ret_val = E1000_SUCCESS;
2275 	u32 bank = 0;
2276 	u16 i, word;
2277 
2278 	DEBUGFUNC("e1000_read_nvm_ich8lan");
2279 
2280 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2281 	    (words == 0)) {
2282 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2283 		ret_val = -E1000_ERR_NVM;
2284 		goto out;
2285 	}
2286 
2287 	nvm->ops.acquire(hw);
2288 
2289 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2290 	if (ret_val != E1000_SUCCESS) {
2291 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2292 		bank = 0;
2293 	}
2294 
2295 	act_offset = (bank) ? nvm->flash_bank_size : 0;
2296 	act_offset += offset;
2297 
2298 	ret_val = E1000_SUCCESS;
2299 	for (i = 0; i < words; i++) {
2300 		if ((dev_spec->shadow_ram) &&
2301 		    (dev_spec->shadow_ram[offset+i].modified)) {
2302 			data[i] = dev_spec->shadow_ram[offset+i].value;
2303 		} else {
2304 			ret_val = e1000_read_flash_word_ich8lan(hw,
2305 			                                        act_offset + i,
2306 			                                        &word);
2307 			if (ret_val)
2308 				break;
2309 			data[i] = word;
2310 		}
2311 	}
2312 
2313 	nvm->ops.release(hw);
2314 
2315 out:
2316 	if (ret_val)
2317 		DEBUGOUT1("NVM read error: %d\n", ret_val);
2318 
2319 	return ret_val;
2320 }
2321 
2322 /**
2323  *  e1000_flash_cycle_init_ich8lan - Initialize flash
2324  *  @hw: pointer to the HW structure
2325  *
2326  *  This function does initial flash setup so that a new read/write/erase cycle
2327  *  can be started.
2328  **/
2329 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2330 {
2331 	union ich8_hws_flash_status hsfsts;
2332 	s32 ret_val = -E1000_ERR_NVM;
2333 
2334 	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2335 
2336 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2337 
2338 	/* Check if the flash descriptor is valid */
2339 	if (hsfsts.hsf_status.fldesvalid == 0) {
2340 		DEBUGOUT("Flash descriptor invalid.  "
2341 		         "SW Sequencing must be used.");
2342 		goto out;
2343 	}
2344 
2345 	/* Clear FCERR and DAEL in hw status by writing 1 */
2346 	hsfsts.hsf_status.flcerr = 1;
2347 	hsfsts.hsf_status.dael = 1;
2348 
2349 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2350 
2351 	/*
2352 	 * Either we should have a hardware SPI cycle in progress
2353 	 * bit to check against, in order to start a new cycle or
2354 	 * FDONE bit should be changed in the hardware so that it
2355 	 * is 1 after hardware reset, which can then be used as an
2356 	 * indication whether a cycle is in progress or has been
2357 	 * completed.
2358 	 */
2359 
2360 	if (hsfsts.hsf_status.flcinprog == 0) {
2361 		/*
2362 		 * There is no cycle running at present,
2363 		 * so we can start a cycle.
2364 		 * Begin by setting Flash Cycle Done.
2365 		 */
2366 		hsfsts.hsf_status.flcdone = 1;
2367 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2368 		ret_val = E1000_SUCCESS;
2369 	} else {
2370 		s32 i;
2371 
2372 		/*
2373 		 * Otherwise poll for sometime so the current
2374 		 * cycle has a chance to end before giving up.
2375 		 */
2376 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2377 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2378 			                                      ICH_FLASH_HSFSTS);
2379 			if (hsfsts.hsf_status.flcinprog == 0) {
2380 				ret_val = E1000_SUCCESS;
2381 				break;
2382 			}
2383 			usec_delay(1);
2384 		}
2385 		if (ret_val == E1000_SUCCESS) {
2386 			/*
2387 			 * Successful in waiting for previous cycle to timeout,
2388 			 * now set the Flash Cycle Done.
2389 			 */
2390 			hsfsts.hsf_status.flcdone = 1;
2391 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2392 			                        hsfsts.regval);
2393 		} else {
2394 			DEBUGOUT("Flash controller busy, cannot get access");
2395 		}
2396 	}
2397 
2398 out:
2399 	return ret_val;
2400 }
2401 
2402 /**
2403  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2404  *  @hw: pointer to the HW structure
2405  *  @timeout: maximum time to wait for completion
2406  *
2407  *  This function starts a flash cycle and waits for its completion.
2408  **/
2409 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2410 {
2411 	union ich8_hws_flash_ctrl hsflctl;
2412 	union ich8_hws_flash_status hsfsts;
2413 	s32 ret_val = -E1000_ERR_NVM;
2414 	u32 i = 0;
2415 
2416 	DEBUGFUNC("e1000_flash_cycle_ich8lan");
2417 
2418 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2419 	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2420 	hsflctl.hsf_ctrl.flcgo = 1;
2421 	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2422 
2423 	/* wait till FDONE bit is set to 1 */
2424 	do {
2425 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2426 		if (hsfsts.hsf_status.flcdone == 1)
2427 			break;
2428 		usec_delay(1);
2429 	} while (i++ < timeout);
2430 
2431 	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2432 		ret_val = E1000_SUCCESS;
2433 
2434 	return ret_val;
2435 }
2436 
2437 /**
2438  *  e1000_read_flash_word_ich8lan - Read word from flash
2439  *  @hw: pointer to the HW structure
2440  *  @offset: offset to data location
2441  *  @data: pointer to the location for storing the data
2442  *
2443  *  Reads the flash word at offset into data.  Offset is converted
2444  *  to bytes before read.
2445  **/
2446 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2447                                          u16 *data)
2448 {
2449 	s32 ret_val;
2450 
2451 	DEBUGFUNC("e1000_read_flash_word_ich8lan");
2452 
2453 	if (!data) {
2454 		ret_val = -E1000_ERR_NVM;
2455 		goto out;
2456 	}
2457 
2458 	/* Must convert offset into bytes. */
2459 	offset <<= 1;
2460 
2461 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2462 
2463 out:
2464 	return ret_val;
2465 }
2466 
2467 /**
2468  *  e1000_read_flash_byte_ich8lan - Read byte from flash
2469  *  @hw: pointer to the HW structure
2470  *  @offset: The offset of the byte to read.
2471  *  @data: Pointer to a byte to store the value read.
2472  *
2473  *  Reads a single byte from the NVM using the flash access registers.
2474  **/
2475 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2476                                          u8 *data)
2477 {
2478 	s32 ret_val = E1000_SUCCESS;
2479 	u16 word = 0;
2480 
2481 	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2482 	if (ret_val)
2483 		goto out;
2484 
2485 	*data = (u8)word;
2486 
2487 out:
2488 	return ret_val;
2489 }
2490 
2491 /**
2492  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
2493  *  @hw: pointer to the HW structure
2494  *  @offset: The offset (in bytes) of the byte or word to read.
2495  *  @size: Size of data to read, 1=byte 2=word
2496  *  @data: Pointer to the word to store the value read.
2497  *
2498  *  Reads a byte or word from the NVM using the flash access registers.
2499  **/
2500 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2501                                          u8 size, u16 *data)
2502 {
2503 	union ich8_hws_flash_status hsfsts;
2504 	union ich8_hws_flash_ctrl hsflctl;
2505 	u32 flash_linear_addr;
2506 	u32 flash_data = 0;
2507 	s32 ret_val = -E1000_ERR_NVM;
2508 	u8 count = 0;
2509 
2510 	DEBUGFUNC("e1000_read_flash_data_ich8lan");
2511 
2512 	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2513 		goto out;
2514 
2515 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2516 	                    hw->nvm.flash_base_addr;
2517 
2518 	do {
2519 		usec_delay(1);
2520 		/* Steps */
2521 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2522 		if (ret_val != E1000_SUCCESS)
2523 			break;
2524 
2525 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2526 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2527 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2528 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2529 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2530 
2531 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2532 
2533 		ret_val = e1000_flash_cycle_ich8lan(hw,
2534 		                                ICH_FLASH_READ_COMMAND_TIMEOUT);
2535 
2536 		/*
2537 		 * Check if FCERR is set to 1, if set to 1, clear it
2538 		 * and try the whole sequence a few more times, else
2539 		 * read in (shift in) the Flash Data0, the order is
2540 		 * least significant byte first msb to lsb
2541 		 */
2542 		if (ret_val == E1000_SUCCESS) {
2543 			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2544 			if (size == 1)
2545 				*data = (u8)(flash_data & 0x000000FF);
2546 			else if (size == 2)
2547 				*data = (u16)(flash_data & 0x0000FFFF);
2548 			break;
2549 		} else {
2550 			/*
2551 			 * If we've gotten here, then things are probably
2552 			 * completely hosed, but if the error condition is
2553 			 * detected, it won't hurt to give it another try...
2554 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2555 			 */
2556 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2557 			                                      ICH_FLASH_HSFSTS);
2558 			if (hsfsts.hsf_status.flcerr == 1) {
2559 				/* Repeat for some time before giving up. */
2560 				continue;
2561 			} else if (hsfsts.hsf_status.flcdone == 0) {
2562 				DEBUGOUT("Timeout error - flash cycle "
2563 				         "did not complete.");
2564 				break;
2565 			}
2566 		}
2567 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2568 
2569 out:
2570 	return ret_val;
2571 }
2572 
2573 /**
2574  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
2575  *  @hw: pointer to the HW structure
2576  *  @offset: The offset (in bytes) of the word(s) to write.
2577  *  @words: Size of data to write in words
2578  *  @data: Pointer to the word(s) to write at offset.
2579  *
2580  *  Writes a byte or word to the NVM using the flash access registers.
2581  **/
2582 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2583                                    u16 *data)
2584 {
2585 	struct e1000_nvm_info *nvm = &hw->nvm;
2586 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2587 	s32 ret_val = E1000_SUCCESS;
2588 	u16 i;
2589 
2590 	DEBUGFUNC("e1000_write_nvm_ich8lan");
2591 
2592 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2593 	    (words == 0)) {
2594 		DEBUGOUT("nvm parameter(s) out of bounds\n");
2595 		ret_val = -E1000_ERR_NVM;
2596 		goto out;
2597 	}
2598 
2599 	nvm->ops.acquire(hw);
2600 
2601 	for (i = 0; i < words; i++) {
2602 		dev_spec->shadow_ram[offset+i].modified = TRUE;
2603 		dev_spec->shadow_ram[offset+i].value = data[i];
2604 	}
2605 
2606 	nvm->ops.release(hw);
2607 
2608 out:
2609 	return ret_val;
2610 }
2611 
2612 /**
2613  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2614  *  @hw: pointer to the HW structure
2615  *
2616  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
2617  *  which writes the checksum to the shadow ram.  The changes in the shadow
2618  *  ram are then committed to the EEPROM by processing each bank at a time
2619  *  checking for the modified bit and writing only the pending changes.
2620  *  After a successful commit, the shadow ram is cleared and is ready for
2621  *  future writes.
2622  **/
2623 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2624 {
2625 	struct e1000_nvm_info *nvm = &hw->nvm;
2626 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2627 	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2628 	s32 ret_val;
2629 	u16 data;
2630 
2631 	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2632 
2633 	ret_val = e1000_update_nvm_checksum_generic(hw);
2634 	if (ret_val)
2635 		goto out;
2636 
2637 	if (nvm->type != e1000_nvm_flash_sw)
2638 		goto out;
2639 
2640 	nvm->ops.acquire(hw);
2641 
2642 	/*
2643 	 * We're writing to the opposite bank so if we're on bank 1,
2644 	 * write to bank 0 etc.  We also need to erase the segment that
2645 	 * is going to be written
2646 	 */
2647 	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2648 	if (ret_val != E1000_SUCCESS) {
2649 		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2650 		bank = 0;
2651 	}
2652 
2653 	if (bank == 0) {
2654 		new_bank_offset = nvm->flash_bank_size;
2655 		old_bank_offset = 0;
2656 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2657 		if (ret_val)
2658 			goto release;
2659 	} else {
2660 		old_bank_offset = nvm->flash_bank_size;
2661 		new_bank_offset = 0;
2662 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2663 		if (ret_val)
2664 			goto release;
2665 	}
2666 
2667 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2668 		/*
2669 		 * Determine whether to write the value stored
2670 		 * in the other NVM bank or a modified value stored
2671 		 * in the shadow RAM
2672 		 */
2673 		if (dev_spec->shadow_ram[i].modified) {
2674 			data = dev_spec->shadow_ram[i].value;
2675 		} else {
2676 			ret_val = e1000_read_flash_word_ich8lan(hw, i +
2677 			                                        old_bank_offset,
2678 			                                        &data);
2679 			if (ret_val)
2680 				break;
2681 		}
2682 
2683 		/*
2684 		 * If the word is 0x13, then make sure the signature bits
2685 		 * (15:14) are 11b until the commit has completed.
2686 		 * This will allow us to write 10b which indicates the
2687 		 * signature is valid.  We want to do this after the write
2688 		 * has completed so that we don't mark the segment valid
2689 		 * while the write is still in progress
2690 		 */
2691 		if (i == E1000_ICH_NVM_SIG_WORD)
2692 			data |= E1000_ICH_NVM_SIG_MASK;
2693 
2694 		/* Convert offset to bytes. */
2695 		act_offset = (i + new_bank_offset) << 1;
2696 
2697 		usec_delay(100);
2698 		/* Write the bytes to the new bank. */
2699 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2700 		                                               act_offset,
2701 		                                               (u8)data);
2702 		if (ret_val)
2703 			break;
2704 
2705 		usec_delay(100);
2706 		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2707 		                                          act_offset + 1,
2708 		                                          (u8)(data >> 8));
2709 		if (ret_val)
2710 			break;
2711 	}
2712 
2713 	/*
2714 	 * Don't bother writing the segment valid bits if sector
2715 	 * programming failed.
2716 	 */
2717 	if (ret_val) {
2718 		DEBUGOUT("Flash commit failed.\n");
2719 		goto release;
2720 	}
2721 
2722 	/*
2723 	 * Finally validate the new segment by setting bit 15:14
2724 	 * to 10b in word 0x13 , this can be done without an
2725 	 * erase as well since these bits are 11 to start with
2726 	 * and we need to change bit 14 to 0b
2727 	 */
2728 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2729 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2730 	if (ret_val)
2731 		goto release;
2732 
2733 	data &= 0xBFFF;
2734 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2735 	                                               act_offset * 2 + 1,
2736 	                                               (u8)(data >> 8));
2737 	if (ret_val)
2738 		goto release;
2739 
2740 	/*
2741 	 * And invalidate the previously valid segment by setting
2742 	 * its signature word (0x13) high_byte to 0b. This can be
2743 	 * done without an erase because flash erase sets all bits
2744 	 * to 1's. We can write 1's to 0's without an erase
2745 	 */
2746 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2747 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2748 	if (ret_val)
2749 		goto release;
2750 
2751 	/* Great!  Everything worked, we can now clear the cached entries. */
2752 	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2753 		dev_spec->shadow_ram[i].modified = FALSE;
2754 		dev_spec->shadow_ram[i].value = 0xFFFF;
2755 	}
2756 
2757 release:
2758 	nvm->ops.release(hw);
2759 
2760 	/*
2761 	 * Reload the EEPROM, or else modifications will not appear
2762 	 * until after the next adapter reset.
2763 	 */
2764 	if (!ret_val) {
2765 		nvm->ops.reload(hw);
2766 		msec_delay(10);
2767 	}
2768 
2769 out:
2770 	if (ret_val)
2771 		DEBUGOUT1("NVM update error: %d\n", ret_val);
2772 
2773 	return ret_val;
2774 }
2775 
2776 /**
2777  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2778  *  @hw: pointer to the HW structure
2779  *
2780  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2781  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
2782  *  calculated, in which case we need to calculate the checksum and set bit 6.
2783  **/
2784 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2785 {
2786 	s32 ret_val = E1000_SUCCESS;
2787 	u16 data;
2788 
2789 	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2790 
2791 	/*
2792 	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
2793 	 * needs to be fixed.  This bit is an indication that the NVM
2794 	 * was prepared by OEM software and did not calculate the
2795 	 * checksum...a likely scenario.
2796 	 */
2797 	ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2798 	if (ret_val)
2799 		goto out;
2800 
2801 	if ((data & 0x40) == 0) {
2802 		data |= 0x40;
2803 		ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2804 		if (ret_val)
2805 			goto out;
2806 		ret_val = hw->nvm.ops.update(hw);
2807 		if (ret_val)
2808 			goto out;
2809 	}
2810 
2811 	ret_val = e1000_validate_nvm_checksum_generic(hw);
2812 
2813 out:
2814 	return ret_val;
2815 }
2816 
2817 /**
2818  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2819  *  @hw: pointer to the HW structure
2820  *  @offset: The offset (in bytes) of the byte/word to read.
2821  *  @size: Size of data to read, 1=byte 2=word
2822  *  @data: The byte(s) to write to the NVM.
2823  *
2824  *  Writes one/two bytes to the NVM using the flash access registers.
2825  **/
2826 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2827                                           u8 size, u16 data)
2828 {
2829 	union ich8_hws_flash_status hsfsts;
2830 	union ich8_hws_flash_ctrl hsflctl;
2831 	u32 flash_linear_addr;
2832 	u32 flash_data = 0;
2833 	s32 ret_val = -E1000_ERR_NVM;
2834 	u8 count = 0;
2835 
2836 	DEBUGFUNC("e1000_write_ich8_data");
2837 
2838 	if (size < 1 || size > 2 || data > size * 0xff ||
2839 	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
2840 		goto out;
2841 
2842 	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2843 	                    hw->nvm.flash_base_addr;
2844 
2845 	do {
2846 		usec_delay(1);
2847 		/* Steps */
2848 		ret_val = e1000_flash_cycle_init_ich8lan(hw);
2849 		if (ret_val != E1000_SUCCESS)
2850 			break;
2851 
2852 		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2853 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2854 		hsflctl.hsf_ctrl.fldbcount = size - 1;
2855 		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2856 		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2857 
2858 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2859 
2860 		if (size == 1)
2861 			flash_data = (u32)data & 0x00FF;
2862 		else
2863 			flash_data = (u32)data;
2864 
2865 		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2866 
2867 		/*
2868 		 * check if FCERR is set to 1 , if set to 1, clear it
2869 		 * and try the whole sequence a few more times else done
2870 		 */
2871 		ret_val = e1000_flash_cycle_ich8lan(hw,
2872 		                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2873 		if (ret_val == E1000_SUCCESS)
2874 			break;
2875 
2876 		/*
2877 		 * If we're here, then things are most likely
2878 		 * completely hosed, but if the error condition
2879 		 * is detected, it won't hurt to give it another
2880 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2881 		 */
2882 		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2883 		if (hsfsts.hsf_status.flcerr == 1)
2884 			/* Repeat for some time before giving up. */
2885 			continue;
2886 		if (hsfsts.hsf_status.flcdone == 0) {
2887 			DEBUGOUT("Timeout error - flash cycle "
2888 				 "did not complete.");
2889 			break;
2890 		}
2891 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2892 
2893 out:
2894 	return ret_val;
2895 }
2896 
2897 /**
2898  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2899  *  @hw: pointer to the HW structure
2900  *  @offset: The index of the byte to read.
2901  *  @data: The byte to write to the NVM.
2902  *
2903  *  Writes a single byte to the NVM using the flash access registers.
2904  **/
2905 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2906                                           u8 data)
2907 {
2908 	u16 word = (u16)data;
2909 
2910 	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2911 
2912 	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2913 }
2914 
2915 /**
2916  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2917  *  @hw: pointer to the HW structure
2918  *  @offset: The offset of the byte to write.
2919  *  @byte: The byte to write to the NVM.
2920  *
2921  *  Writes a single byte to the NVM using the flash access registers.
2922  *  Goes through a retry algorithm before giving up.
2923  **/
2924 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2925                                                 u32 offset, u8 byte)
2926 {
2927 	s32 ret_val;
2928 	u16 program_retries;
2929 
2930 	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2931 
2932 	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2933 	if (ret_val == E1000_SUCCESS)
2934 		goto out;
2935 
2936 	for (program_retries = 0; program_retries < 100; program_retries++) {
2937 		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2938 		usec_delay(100);
2939 		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2940 		if (ret_val == E1000_SUCCESS)
2941 			break;
2942 	}
2943 	if (program_retries == 100) {
2944 		ret_val = -E1000_ERR_NVM;
2945 		goto out;
2946 	}
2947 
2948 out:
2949 	return ret_val;
2950 }
2951 
2952 /**
2953  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2954  *  @hw: pointer to the HW structure
2955  *  @bank: 0 for first bank, 1 for second bank, etc.
2956  *
2957  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2958  *  bank N is 4096 * N + flash_reg_addr.
2959  **/
2960 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2961 {
2962 	struct e1000_nvm_info *nvm = &hw->nvm;
2963 	union ich8_hws_flash_status hsfsts;
2964 	union ich8_hws_flash_ctrl hsflctl;
2965 	u32 flash_linear_addr;
2966 	/* bank size is in 16bit words - adjust to bytes */
2967 	u32 flash_bank_size = nvm->flash_bank_size * 2;
2968 	s32 ret_val = E1000_SUCCESS;
2969 	s32 count = 0;
2970 	s32 j, iteration, sector_size;
2971 
2972 	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
2973 
2974 	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2975 
2976 	/*
2977 	 * Determine HW Sector size: Read BERASE bits of hw flash status
2978 	 * register
2979 	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2980 	 *     consecutive sectors.  The start index for the nth Hw sector
2981 	 *     can be calculated as = bank * 4096 + n * 256
2982 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2983 	 *     The start index for the nth Hw sector can be calculated
2984 	 *     as = bank * 4096
2985 	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2986 	 *     (ich9 only, otherwise error condition)
2987 	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2988 	 */
2989 	switch (hsfsts.hsf_status.berasesz) {
2990 	case 0:
2991 		/* Hw sector size 256 */
2992 		sector_size = ICH_FLASH_SEG_SIZE_256;
2993 		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2994 		break;
2995 	case 1:
2996 		sector_size = ICH_FLASH_SEG_SIZE_4K;
2997 		iteration = 1;
2998 		break;
2999 	case 2:
3000 		sector_size = ICH_FLASH_SEG_SIZE_8K;
3001 		iteration = 1;
3002 		break;
3003 	case 3:
3004 		sector_size = ICH_FLASH_SEG_SIZE_64K;
3005 		iteration = 1;
3006 		break;
3007 	default:
3008 		ret_val = -E1000_ERR_NVM;
3009 		goto out;
3010 	}
3011 
3012 	/* Start with the base address, then add the sector offset. */
3013 	flash_linear_addr = hw->nvm.flash_base_addr;
3014 	flash_linear_addr += (bank) ? flash_bank_size : 0;
3015 
3016 	for (j = 0; j < iteration ; j++) {
3017 		do {
3018 			/* Steps */
3019 			ret_val = e1000_flash_cycle_init_ich8lan(hw);
3020 			if (ret_val)
3021 				goto out;
3022 
3023 			/*
3024 			 * Write a value 11 (block Erase) in Flash
3025 			 * Cycle field in hw flash control
3026 			 */
3027 			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3028 			                                      ICH_FLASH_HSFCTL);
3029 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3030 			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3031 			                        hsflctl.regval);
3032 
3033 			/*
3034 			 * Write the last 24 bits of an index within the
3035 			 * block into Flash Linear address field in Flash
3036 			 * Address.
3037 			 */
3038 			flash_linear_addr += (j * sector_size);
3039 			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3040 			                      flash_linear_addr);
3041 
3042 			ret_val = e1000_flash_cycle_ich8lan(hw,
3043 			                       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3044 			if (ret_val == E1000_SUCCESS)
3045 				break;
3046 
3047 			/*
3048 			 * Check if FCERR is set to 1.  If 1,
3049 			 * clear it and try the whole sequence
3050 			 * a few more times else Done
3051 			 */
3052 			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3053 						      ICH_FLASH_HSFSTS);
3054 			if (hsfsts.hsf_status.flcerr == 1)
3055 				/* repeat for some time before giving up */
3056 				continue;
3057 			else if (hsfsts.hsf_status.flcdone == 0)
3058 				goto out;
3059 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3060 	}
3061 
3062 out:
3063 	return ret_val;
3064 }
3065 
3066 /**
3067  *  e1000_valid_led_default_ich8lan - Set the default LED settings
3068  *  @hw: pointer to the HW structure
3069  *  @data: Pointer to the LED settings
3070  *
3071  *  Reads the LED default settings from the NVM to data.  If the NVM LED
3072  *  settings is all 0's or F's, set the LED default to a valid LED default
3073  *  setting.
3074  **/
3075 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3076 {
3077 	s32 ret_val;
3078 
3079 	DEBUGFUNC("e1000_valid_led_default_ich8lan");
3080 
3081 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3082 	if (ret_val) {
3083 		DEBUGOUT("NVM Read Error\n");
3084 		goto out;
3085 	}
3086 
3087 	if (*data == ID_LED_RESERVED_0000 ||
3088 	    *data == ID_LED_RESERVED_FFFF)
3089 		*data = ID_LED_DEFAULT_ICH8LAN;
3090 
3091 out:
3092 	return ret_val;
3093 }
3094 
3095 /**
3096  *  e1000_id_led_init_pchlan - store LED configurations
3097  *  @hw: pointer to the HW structure
3098  *
3099  *  PCH does not control LEDs via the LEDCTL register, rather it uses
3100  *  the PHY LED configuration register.
3101  *
3102  *  PCH also does not have an "always on" or "always off" mode which
3103  *  complicates the ID feature.  Instead of using the "on" mode to indicate
3104  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3105  *  use "link_up" mode.  The LEDs will still ID on request if there is no
3106  *  link based on logic in e1000_led_[on|off]_pchlan().
3107  **/
3108 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3109 {
3110 	struct e1000_mac_info *mac = &hw->mac;
3111 	s32 ret_val;
3112 	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3113 	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3114 	u16 data, i, temp, shift;
3115 
3116 	DEBUGFUNC("e1000_id_led_init_pchlan");
3117 
3118 	/* Get default ID LED modes */
3119 	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3120 	if (ret_val)
3121 		goto out;
3122 
3123 	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3124 	mac->ledctl_mode1 = mac->ledctl_default;
3125 	mac->ledctl_mode2 = mac->ledctl_default;
3126 
3127 	for (i = 0; i < 4; i++) {
3128 		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3129 		shift = (i * 5);
3130 		switch (temp) {
3131 		case ID_LED_ON1_DEF2:
3132 		case ID_LED_ON1_ON2:
3133 		case ID_LED_ON1_OFF2:
3134 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3135 			mac->ledctl_mode1 |= (ledctl_on << shift);
3136 			break;
3137 		case ID_LED_OFF1_DEF2:
3138 		case ID_LED_OFF1_ON2:
3139 		case ID_LED_OFF1_OFF2:
3140 			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3141 			mac->ledctl_mode1 |= (ledctl_off << shift);
3142 			break;
3143 		default:
3144 			/* Do nothing */
3145 			break;
3146 		}
3147 		switch (temp) {
3148 		case ID_LED_DEF1_ON2:
3149 		case ID_LED_ON1_ON2:
3150 		case ID_LED_OFF1_ON2:
3151 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3152 			mac->ledctl_mode2 |= (ledctl_on << shift);
3153 			break;
3154 		case ID_LED_DEF1_OFF2:
3155 		case ID_LED_ON1_OFF2:
3156 		case ID_LED_OFF1_OFF2:
3157 			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3158 			mac->ledctl_mode2 |= (ledctl_off << shift);
3159 			break;
3160 		default:
3161 			/* Do nothing */
3162 			break;
3163 		}
3164 	}
3165 
3166 out:
3167 	return ret_val;
3168 }
3169 
3170 /**
3171  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3172  *  @hw: pointer to the HW structure
3173  *
3174  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3175  *  register, so the the bus width is hard coded.
3176  **/
3177 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3178 {
3179 	struct e1000_bus_info *bus = &hw->bus;
3180 	s32 ret_val;
3181 
3182 	DEBUGFUNC("e1000_get_bus_info_ich8lan");
3183 
3184 	ret_val = e1000_get_bus_info_pcie_generic(hw);
3185 
3186 	/*
3187 	 * ICH devices are "PCI Express"-ish.  They have
3188 	 * a configuration space, but do not contain
3189 	 * PCI Express Capability registers, so bus width
3190 	 * must be hardcoded.
3191 	 */
3192 	if (bus->width == e1000_bus_width_unknown)
3193 		bus->width = e1000_bus_width_pcie_x1;
3194 
3195 	return ret_val;
3196 }
3197 
3198 /**
3199  *  e1000_reset_hw_ich8lan - Reset the hardware
3200  *  @hw: pointer to the HW structure
3201  *
3202  *  Does a full reset of the hardware which includes a reset of the PHY and
3203  *  MAC.
3204  **/
3205 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3206 {
3207 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3208 	u16 reg;
3209 	u32 ctrl, kab;
3210 	s32 ret_val;
3211 
3212 	DEBUGFUNC("e1000_reset_hw_ich8lan");
3213 
3214 	/*
3215 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
3216 	 * on the last TLP read/write transaction when MAC is reset.
3217 	 */
3218 	ret_val = e1000_disable_pcie_master_generic(hw);
3219 	if (ret_val)
3220 		DEBUGOUT("PCI-E Master disable polling has failed.\n");
3221 
3222 	DEBUGOUT("Masking off all interrupts\n");
3223 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3224 
3225 	/*
3226 	 * Disable the Transmit and Receive units.  Then delay to allow
3227 	 * any pending transactions to complete before we hit the MAC
3228 	 * with the global reset.
3229 	 */
3230 	E1000_WRITE_REG(hw, E1000_RCTL, 0);
3231 	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3232 	E1000_WRITE_FLUSH(hw);
3233 
3234 	msec_delay(10);
3235 
3236 	/* Workaround for ICH8 bit corruption issue in FIFO memory */
3237 	if (hw->mac.type == e1000_ich8lan) {
3238 		/* Set Tx and Rx buffer allocation to 8k apiece. */
3239 		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3240 		/* Set Packet Buffer Size to 16k. */
3241 		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3242 	}
3243 
3244 	if (hw->mac.type == e1000_pchlan) {
3245 		/* Save the NVM K1 bit setting*/
3246 		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
3247 		if (ret_val)
3248 			return ret_val;
3249 
3250 		if (reg & E1000_NVM_K1_ENABLE)
3251 			dev_spec->nvm_k1_enabled = TRUE;
3252 		else
3253 			dev_spec->nvm_k1_enabled = FALSE;
3254 	}
3255 
3256 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3257 
3258 	if (!hw->phy.ops.check_reset_block(hw)) {
3259 		/*
3260 		 * Full-chip reset requires MAC and PHY reset at the same
3261 		 * time to make sure the interface between MAC and the
3262 		 * external PHY is reset.
3263 		 */
3264 		ctrl |= E1000_CTRL_PHY_RST;
3265 
3266 		/*
3267 		 * Gate automatic PHY configuration by hardware on
3268 		 * non-managed 82579
3269 		 */
3270 		if ((hw->mac.type == e1000_pch2lan) &&
3271 		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3272 			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3273 	}
3274 	ret_val = e1000_acquire_swflag_ich8lan(hw);
3275 	DEBUGOUT("Issuing a global reset to ich8lan\n");
3276 	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3277 	msec_delay(20);
3278 
3279 	if (ctrl & E1000_CTRL_PHY_RST) {
3280 		ret_val = hw->phy.ops.get_cfg_done(hw);
3281 		if (ret_val)
3282 			goto out;
3283 
3284 		ret_val = e1000_post_phy_reset_ich8lan(hw);
3285 		if (ret_val)
3286 			goto out;
3287 	}
3288 
3289 	/*
3290 	 * For PCH, this write will make sure that any noise
3291 	 * will be detected as a CRC error and be dropped rather than show up
3292 	 * as a bad packet to the DMA engine.
3293 	 */
3294 	if (hw->mac.type == e1000_pchlan)
3295 		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3296 
3297 	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3298 	E1000_READ_REG(hw, E1000_ICR);
3299 
3300 	kab = E1000_READ_REG(hw, E1000_KABGTXD);
3301 	kab |= E1000_KABGTXD_BGSQLBIAS;
3302 	E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3303 
3304 out:
3305 	return ret_val;
3306 }
3307 
3308 /**
3309  *  e1000_init_hw_ich8lan - Initialize the hardware
3310  *  @hw: pointer to the HW structure
3311  *
3312  *  Prepares the hardware for transmit and receive by doing the following:
3313  *   - initialize hardware bits
3314  *   - initialize LED identification
3315  *   - setup receive address registers
3316  *   - setup flow control
3317  *   - setup transmit descriptors
3318  *   - clear statistics
3319  **/
3320 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3321 {
3322 	struct e1000_mac_info *mac = &hw->mac;
3323 	u32 ctrl_ext, txdctl, snoop;
3324 	s32 ret_val;
3325 	u16 i;
3326 
3327 	DEBUGFUNC("e1000_init_hw_ich8lan");
3328 
3329 	e1000_initialize_hw_bits_ich8lan(hw);
3330 
3331 	/* Initialize identification LED */
3332 	ret_val = mac->ops.id_led_init(hw);
3333 	if (ret_val)
3334 		DEBUGOUT("Error initializing identification LED\n");
3335 		/* This is not fatal and we should not stop init due to this */
3336 
3337 	/* Setup the receive address. */
3338 	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3339 
3340 	/* Zero out the Multicast HASH table */
3341 	DEBUGOUT("Zeroing the MTA\n");
3342 	for (i = 0; i < mac->mta_reg_count; i++)
3343 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3344 
3345 	/*
3346 	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3347 	 * the ME.  Disable wakeup by clearing the host wakeup bit.
3348 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3349 	 */
3350 	if (hw->phy.type == e1000_phy_82578) {
3351 		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3352 		i &= ~BM_WUC_HOST_WU_BIT;
3353 		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3354 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
3355 		if (ret_val)
3356 			return ret_val;
3357 	}
3358 
3359 	/* Setup link and flow control */
3360 	ret_val = mac->ops.setup_link(hw);
3361 
3362 	/* Set the transmit descriptor write-back policy for both queues */
3363 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3364 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3365 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3366 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3367 	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3368 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3369 	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3370 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3371 		 E1000_TXDCTL_FULL_TX_DESC_WB;
3372 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3373 	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3374 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3375 
3376 	/*
3377 	 * ICH8 has opposite polarity of no_snoop bits.
3378 	 * By default, we should use snoop behavior.
3379 	 */
3380 	if (mac->type == e1000_ich8lan)
3381 		snoop = PCIE_ICH8_SNOOP_ALL;
3382 	else
3383 		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3384 	e1000_set_pcie_no_snoop_generic(hw, snoop);
3385 
3386 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3387 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3388 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3389 
3390 	/*
3391 	 * Clear all of the statistics registers (clear on read).  It is
3392 	 * important that we do this after we have tried to establish link
3393 	 * because the symbol error count will increment wildly if there
3394 	 * is no link.
3395 	 */
3396 	e1000_clear_hw_cntrs_ich8lan(hw);
3397 
3398 	return ret_val;
3399 }
3400 /**
3401  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3402  *  @hw: pointer to the HW structure
3403  *
3404  *  Sets/Clears required hardware bits necessary for correctly setting up the
3405  *  hardware for transmit and receive.
3406  **/
3407 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3408 {
3409 	u32 reg;
3410 
3411 	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3412 
3413 	/* Extended Device Control */
3414 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3415 	reg |= (1 << 22);
3416 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3417 	if (hw->mac.type >= e1000_pchlan)
3418 		reg |= E1000_CTRL_EXT_PHYPDEN;
3419 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3420 
3421 	/* Transmit Descriptor Control 0 */
3422 	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3423 	reg |= (1 << 22);
3424 	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3425 
3426 	/* Transmit Descriptor Control 1 */
3427 	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3428 	reg |= (1 << 22);
3429 	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3430 
3431 	/* Transmit Arbitration Control 0 */
3432 	reg = E1000_READ_REG(hw, E1000_TARC(0));
3433 	if (hw->mac.type == e1000_ich8lan)
3434 		reg |= (1 << 28) | (1 << 29);
3435 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3436 	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3437 
3438 	/* Transmit Arbitration Control 1 */
3439 	reg = E1000_READ_REG(hw, E1000_TARC(1));
3440 	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3441 		reg &= ~(1 << 28);
3442 	else
3443 		reg |= (1 << 28);
3444 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
3445 	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3446 
3447 	/* Device Status */
3448 	if (hw->mac.type == e1000_ich8lan) {
3449 		reg = E1000_READ_REG(hw, E1000_STATUS);
3450 		reg &= ~(1 << 31);
3451 		E1000_WRITE_REG(hw, E1000_STATUS, reg);
3452 	}
3453 
3454 	/*
3455 	 * work-around descriptor data corruption issue during nfs v2 udp
3456 	 * traffic, just disable the nfs filtering capability
3457 	 */
3458 	reg = E1000_READ_REG(hw, E1000_RFCTL);
3459 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3460 	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3461 
3462 	return;
3463 }
3464 
3465 /**
3466  *  e1000_setup_link_ich8lan - Setup flow control and link settings
3467  *  @hw: pointer to the HW structure
3468  *
3469  *  Determines which flow control settings to use, then configures flow
3470  *  control.  Calls the appropriate media-specific link configuration
3471  *  function.  Assuming the adapter has a valid link partner, a valid link
3472  *  should be established.  Assumes the hardware has previously been reset
3473  *  and the transmitter and receiver are not enabled.
3474  **/
3475 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3476 {
3477 	s32 ret_val = E1000_SUCCESS;
3478 
3479 	DEBUGFUNC("e1000_setup_link_ich8lan");
3480 
3481 	if (hw->phy.ops.check_reset_block(hw))
3482 		goto out;
3483 
3484 	/*
3485 	 * ICH parts do not have a word in the NVM to determine
3486 	 * the default flow control setting, so we explicitly
3487 	 * set it to full.
3488 	 */
3489 	if (hw->fc.requested_mode == e1000_fc_default)
3490 		hw->fc.requested_mode = e1000_fc_full;
3491 
3492 	/*
3493 	 * Save off the requested flow control mode for use later.  Depending
3494 	 * on the link partner's capabilities, we may or may not use this mode.
3495 	 */
3496 	hw->fc.current_mode = hw->fc.requested_mode;
3497 
3498 	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3499 		hw->fc.current_mode);
3500 
3501 	/* Continue to configure the copper link. */
3502 	ret_val = hw->mac.ops.setup_physical_interface(hw);
3503 	if (ret_val)
3504 		goto out;
3505 
3506 	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3507 	if ((hw->phy.type == e1000_phy_82578) ||
3508 	    (hw->phy.type == e1000_phy_82579) ||
3509 	    (hw->phy.type == e1000_phy_82577)) {
3510 		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3511 
3512 		ret_val = hw->phy.ops.write_reg(hw,
3513 		                             PHY_REG(BM_PORT_CTRL_PAGE, 27),
3514 		                             hw->fc.pause_time);
3515 		if (ret_val)
3516 			goto out;
3517 	}
3518 
3519 	ret_val = e1000_set_fc_watermarks_generic(hw);
3520 
3521 out:
3522 	return ret_val;
3523 }
3524 
3525 /**
3526  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3527  *  @hw: pointer to the HW structure
3528  *
3529  *  Configures the kumeran interface to the PHY to wait the appropriate time
3530  *  when polling the PHY, then call the generic setup_copper_link to finish
3531  *  configuring the copper link.
3532  **/
3533 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3534 {
3535 	u32 ctrl;
3536 	s32 ret_val;
3537 	u16 reg_data;
3538 
3539 	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3540 
3541 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3542 	ctrl |= E1000_CTRL_SLU;
3543 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3544 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3545 
3546 	/*
3547 	 * Set the mac to wait the maximum time between each iteration
3548 	 * and increase the max iterations when polling the phy;
3549 	 * this fixes erroneous timeouts at 10Mbps.
3550 	 */
3551 	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3552 	                                       0xFFFF);
3553 	if (ret_val)
3554 		goto out;
3555 	ret_val = e1000_read_kmrn_reg_generic(hw,
3556 	                                      E1000_KMRNCTRLSTA_INBAND_PARAM,
3557 	                                      &reg_data);
3558 	if (ret_val)
3559 		goto out;
3560 	reg_data |= 0x3F;
3561 	ret_val = e1000_write_kmrn_reg_generic(hw,
3562 	                                       E1000_KMRNCTRLSTA_INBAND_PARAM,
3563 	                                       reg_data);
3564 	if (ret_val)
3565 		goto out;
3566 
3567 	switch (hw->phy.type) {
3568 	case e1000_phy_igp_3:
3569 		ret_val = e1000_copper_link_setup_igp(hw);
3570 		if (ret_val)
3571 			goto out;
3572 		break;
3573 	case e1000_phy_bm:
3574 	case e1000_phy_82578:
3575 		ret_val = e1000_copper_link_setup_m88(hw);
3576 		if (ret_val)
3577 			goto out;
3578 		break;
3579 	case e1000_phy_82577:
3580 	case e1000_phy_82579:
3581 		ret_val = e1000_copper_link_setup_82577(hw);
3582 		if (ret_val)
3583 			goto out;
3584 		break;
3585 	case e1000_phy_ife:
3586 		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3587 		                               &reg_data);
3588 		if (ret_val)
3589 			goto out;
3590 
3591 		reg_data &= ~IFE_PMC_AUTO_MDIX;
3592 
3593 		switch (hw->phy.mdix) {
3594 		case 1:
3595 			reg_data &= ~IFE_PMC_FORCE_MDIX;
3596 			break;
3597 		case 2:
3598 			reg_data |= IFE_PMC_FORCE_MDIX;
3599 			break;
3600 		case 0:
3601 		default:
3602 			reg_data |= IFE_PMC_AUTO_MDIX;
3603 			break;
3604 		}
3605 		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3606 		                                reg_data);
3607 		if (ret_val)
3608 			goto out;
3609 		break;
3610 	default:
3611 		break;
3612 	}
3613 	ret_val = e1000_setup_copper_link_generic(hw);
3614 
3615 out:
3616 	return ret_val;
3617 }
3618 
3619 /**
3620  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3621  *  @hw: pointer to the HW structure
3622  *  @speed: pointer to store current link speed
3623  *  @duplex: pointer to store the current link duplex
3624  *
3625  *  Calls the generic get_speed_and_duplex to retrieve the current link
3626  *  information and then calls the Kumeran lock loss workaround for links at
3627  *  gigabit speeds.
3628  **/
3629 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3630                                           u16 *duplex)
3631 {
3632 	s32 ret_val;
3633 
3634 	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3635 
3636 	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3637 	if (ret_val)
3638 		goto out;
3639 
3640 	if ((hw->mac.type == e1000_ich8lan) &&
3641 	    (hw->phy.type == e1000_phy_igp_3) &&
3642 	    (*speed == SPEED_1000)) {
3643 		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3644 	}
3645 
3646 out:
3647 	return ret_val;
3648 }
3649 
3650 /**
3651  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3652  *  @hw: pointer to the HW structure
3653  *
3654  *  Work-around for 82566 Kumeran PCS lock loss:
3655  *  On link status change (i.e. PCI reset, speed change) and link is up and
3656  *  speed is gigabit-
3657  *    0) if workaround is optionally disabled do nothing
3658  *    1) wait 1ms for Kumeran link to come up
3659  *    2) check Kumeran Diagnostic register PCS lock loss bit
3660  *    3) if not set the link is locked (all is good), otherwise...
3661  *    4) reset the PHY
3662  *    5) repeat up to 10 times
3663  *  Note: this is only called for IGP3 copper when speed is 1gb.
3664  **/
3665 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3666 {
3667 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3668 	u32 phy_ctrl;
3669 	s32 ret_val = E1000_SUCCESS;
3670 	u16 i, data;
3671 	bool link;
3672 
3673 	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3674 
3675 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3676 		goto out;
3677 
3678 	/*
3679 	 * Make sure link is up before proceeding.  If not just return.
3680 	 * Attempting this while link is negotiating fouled up link
3681 	 * stability
3682 	 */
3683 	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3684 	if (!link) {
3685 		ret_val = E1000_SUCCESS;
3686 		goto out;
3687 	}
3688 
3689 	for (i = 0; i < 10; i++) {
3690 		/* read once to clear */
3691 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3692 		if (ret_val)
3693 			goto out;
3694 		/* and again to get new status */
3695 		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3696 		if (ret_val)
3697 			goto out;
3698 
3699 		/* check for PCS lock */
3700 		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3701 			ret_val = E1000_SUCCESS;
3702 			goto out;
3703 		}
3704 
3705 		/* Issue PHY reset */
3706 		hw->phy.ops.reset(hw);
3707 		msec_delay_irq(5);
3708 	}
3709 	/* Disable GigE link negotiation */
3710 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3711 	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3712 	             E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3713 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3714 
3715 	/*
3716 	 * Call gig speed drop workaround on Gig disable before accessing
3717 	 * any PHY registers
3718 	 */
3719 	e1000_gig_downshift_workaround_ich8lan(hw);
3720 
3721 	/* unable to acquire PCS lock */
3722 	ret_val = -E1000_ERR_PHY;
3723 
3724 out:
3725 	return ret_val;
3726 }
3727 
3728 /**
3729  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3730  *  @hw: pointer to the HW structure
3731  *  @state: boolean value used to set the current Kumeran workaround state
3732  *
3733  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
3734  *  /disabled - FALSE).
3735  **/
3736 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3737                                                  bool state)
3738 {
3739 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3740 
3741 	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3742 
3743 	if (hw->mac.type != e1000_ich8lan) {
3744 		DEBUGOUT("Workaround applies to ICH8 only.\n");
3745 		return;
3746 	}
3747 
3748 	dev_spec->kmrn_lock_loss_workaround_enabled = state;
3749 
3750 	return;
3751 }
3752 
3753 /**
3754  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3755  *  @hw: pointer to the HW structure
3756  *
3757  *  Workaround for 82566 power-down on D3 entry:
3758  *    1) disable gigabit link
3759  *    2) write VR power-down enable
3760  *    3) read it back
3761  *  Continue if successful, else issue LCD reset and repeat
3762  **/
3763 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3764 {
3765 	u32 reg;
3766 	u16 data;
3767 	u8  retry = 0;
3768 
3769 	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3770 
3771 	if (hw->phy.type != e1000_phy_igp_3)
3772 		goto out;
3773 
3774 	/* Try the workaround twice (if needed) */
3775 	do {
3776 		/* Disable link */
3777 		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3778 		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3779 		        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3780 		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3781 
3782 		/*
3783 		 * Call gig speed drop workaround on Gig disable before
3784 		 * accessing any PHY registers
3785 		 */
3786 		if (hw->mac.type == e1000_ich8lan)
3787 			e1000_gig_downshift_workaround_ich8lan(hw);
3788 
3789 		/* Write VR power-down enable */
3790 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3791 		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3792 		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3793 		                   data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3794 
3795 		/* Read it back and test */
3796 		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3797 		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3798 		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3799 			break;
3800 
3801 		/* Issue PHY reset and repeat at most one more time */
3802 		reg = E1000_READ_REG(hw, E1000_CTRL);
3803 		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3804 		retry++;
3805 	} while (retry);
3806 
3807 out:
3808 	return;
3809 }
3810 
3811 /**
3812  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3813  *  @hw: pointer to the HW structure
3814  *
3815  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3816  *  LPLU, Gig disable, MDIC PHY reset):
3817  *    1) Set Kumeran Near-end loopback
3818  *    2) Clear Kumeran Near-end loopback
3819  *  Should only be called for ICH8[m] devices with IGP_3 Phy.
3820  **/
3821 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3822 {
3823 	s32 ret_val = E1000_SUCCESS;
3824 	u16 reg_data;
3825 
3826 	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3827 
3828 	if ((hw->mac.type != e1000_ich8lan) ||
3829 	    (hw->phy.type != e1000_phy_igp_3))
3830 		goto out;
3831 
3832 	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3833 	                                      &reg_data);
3834 	if (ret_val)
3835 		goto out;
3836 	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3837 	ret_val = e1000_write_kmrn_reg_generic(hw,
3838 	                                       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3839 	                                       reg_data);
3840 	if (ret_val)
3841 		goto out;
3842 	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3843 	ret_val = e1000_write_kmrn_reg_generic(hw,
3844 	                                       E1000_KMRNCTRLSTA_DIAG_OFFSET,
3845 	                                       reg_data);
3846 out:
3847 	return;
3848 }
3849 
3850 /**
3851  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3852  *  @hw: pointer to the HW structure
3853  *
3854  *  During S0 to Sx transition, it is possible the link remains at gig
3855  *  instead of negotiating to a lower speed.  Before going to Sx, set
3856  *  'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3857  *  to a lower speed.  For PCH and newer parts, the OEM bits PHY register
3858  *  (LED, GbE disable and LPLU configurations) also needs to be written.
3859  **/
3860 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3861 {
3862 	u32 phy_ctrl;
3863 	s32 ret_val;
3864 
3865 	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
3866 
3867 	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3868 	phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3869 	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3870 
3871 	if (hw->mac.type >= e1000_pchlan) {
3872 		e1000_oem_bits_config_ich8lan(hw, FALSE);
3873 		ret_val = hw->phy.ops.acquire(hw);
3874 		if (ret_val)
3875 			return;
3876 		e1000_write_smbus_addr(hw);
3877 		hw->phy.ops.release(hw);
3878 	}
3879 
3880 	return;
3881 }
3882 
3883 /**
3884  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
3885  *  @hw: pointer to the HW structure
3886  *
3887  *  During Sx to S0 transitions on non-managed devices or managed devices
3888  *  on which PHY resets are not blocked, if the PHY registers cannot be
3889  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
3890  *  the PHY.
3891  **/
3892 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3893 {
3894 	u32 fwsm;
3895 
3896 	DEBUGFUNC("e1000_resume_workarounds_pchlan");
3897 
3898 	if (hw->mac.type != e1000_pch2lan)
3899 		return;
3900 
3901 	fwsm = E1000_READ_REG(hw, E1000_FWSM);
3902 	if (!(fwsm & E1000_ICH_FWSM_FW_VALID) ||
3903 	    !hw->phy.ops.check_reset_block(hw)) {
3904 		u16 phy_id1, phy_id2;
3905 		s32 ret_val;
3906 
3907 		ret_val = hw->phy.ops.acquire(hw);
3908 		if (ret_val) {
3909 			DEBUGOUT("Failed to acquire PHY semaphore in resume\n");
3910 			return;
3911 		}
3912 
3913 		/* Test access to the PHY registers by reading the ID regs */
3914 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
3915 		if (ret_val)
3916 			goto release;
3917 		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
3918 		if (ret_val)
3919 			goto release;
3920 
3921 		if (hw->phy.id == ((u32)(phy_id1 << 16) |
3922 				   (u32)(phy_id2 & PHY_REVISION_MASK)))
3923 			goto release;
3924 
3925 		e1000_toggle_lanphypc_value_ich8lan(hw);
3926 
3927 		hw->phy.ops.release(hw);
3928 		msec_delay(50);
3929 		hw->phy.ops.reset(hw);
3930 		msec_delay(50);
3931 		return;
3932 	}
3933 
3934 release:
3935 	hw->phy.ops.release(hw);
3936 
3937 	return;
3938 }
3939 
3940 /**
3941  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
3942  *  @hw: pointer to the HW structure
3943  *
3944  *  Return the LED back to the default configuration.
3945  **/
3946 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3947 {
3948 	DEBUGFUNC("e1000_cleanup_led_ich8lan");
3949 
3950 	if (hw->phy.type == e1000_phy_ife)
3951 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3952 		                             0);
3953 
3954 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3955 	return E1000_SUCCESS;
3956 }
3957 
3958 /**
3959  *  e1000_led_on_ich8lan - Turn LEDs on
3960  *  @hw: pointer to the HW structure
3961  *
3962  *  Turn on the LEDs.
3963  **/
3964 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3965 {
3966 	DEBUGFUNC("e1000_led_on_ich8lan");
3967 
3968 	if (hw->phy.type == e1000_phy_ife)
3969 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3970 		                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3971 
3972 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3973 	return E1000_SUCCESS;
3974 }
3975 
3976 /**
3977  *  e1000_led_off_ich8lan - Turn LEDs off
3978  *  @hw: pointer to the HW structure
3979  *
3980  *  Turn off the LEDs.
3981  **/
3982 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3983 {
3984 	DEBUGFUNC("e1000_led_off_ich8lan");
3985 
3986 	if (hw->phy.type == e1000_phy_ife)
3987 		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3988 		               (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3989 
3990 	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3991 	return E1000_SUCCESS;
3992 }
3993 
3994 /**
3995  *  e1000_setup_led_pchlan - Configures SW controllable LED
3996  *  @hw: pointer to the HW structure
3997  *
3998  *  This prepares the SW controllable LED for use.
3999  **/
4000 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4001 {
4002 	DEBUGFUNC("e1000_setup_led_pchlan");
4003 
4004 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4005 					(u16)hw->mac.ledctl_mode1);
4006 }
4007 
4008 /**
4009  *  e1000_cleanup_led_pchlan - Restore the default LED operation
4010  *  @hw: pointer to the HW structure
4011  *
4012  *  Return the LED back to the default configuration.
4013  **/
4014 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4015 {
4016 	DEBUGFUNC("e1000_cleanup_led_pchlan");
4017 
4018 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4019 					(u16)hw->mac.ledctl_default);
4020 }
4021 
4022 /**
4023  *  e1000_led_on_pchlan - Turn LEDs on
4024  *  @hw: pointer to the HW structure
4025  *
4026  *  Turn on the LEDs.
4027  **/
4028 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4029 {
4030 	u16 data = (u16)hw->mac.ledctl_mode2;
4031 	u32 i, led;
4032 
4033 	DEBUGFUNC("e1000_led_on_pchlan");
4034 
4035 	/*
4036 	 * If no link, then turn LED on by setting the invert bit
4037 	 * for each LED that's mode is "link_up" in ledctl_mode2.
4038 	 */
4039 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4040 		for (i = 0; i < 3; i++) {
4041 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4042 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4043 			    E1000_LEDCTL_MODE_LINK_UP)
4044 				continue;
4045 			if (led & E1000_PHY_LED0_IVRT)
4046 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4047 			else
4048 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4049 		}
4050 	}
4051 
4052 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4053 }
4054 
4055 /**
4056  *  e1000_led_off_pchlan - Turn LEDs off
4057  *  @hw: pointer to the HW structure
4058  *
4059  *  Turn off the LEDs.
4060  **/
4061 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4062 {
4063 	u16 data = (u16)hw->mac.ledctl_mode1;
4064 	u32 i, led;
4065 
4066 	DEBUGFUNC("e1000_led_off_pchlan");
4067 
4068 	/*
4069 	 * If no link, then turn LED off by clearing the invert bit
4070 	 * for each LED that's mode is "link_up" in ledctl_mode1.
4071 	 */
4072 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4073 		for (i = 0; i < 3; i++) {
4074 			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4075 			if ((led & E1000_PHY_LED0_MODE_MASK) !=
4076 			    E1000_LEDCTL_MODE_LINK_UP)
4077 				continue;
4078 			if (led & E1000_PHY_LED0_IVRT)
4079 				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4080 			else
4081 				data |= (E1000_PHY_LED0_IVRT << (i * 5));
4082 		}
4083 	}
4084 
4085 	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4086 }
4087 
4088 /**
4089  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4090  *  @hw: pointer to the HW structure
4091  *
4092  *  Read appropriate register for the config done bit for completion status
4093  *  and configure the PHY through s/w for EEPROM-less parts.
4094  *
4095  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
4096  *  config done bit, so only an error is logged and continues.  If we were
4097  *  to return with error, EEPROM-less silicon would not be able to be reset
4098  *  or change link.
4099  **/
4100 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4101 {
4102 	s32 ret_val = E1000_SUCCESS;
4103 	u32 bank = 0;
4104 	u32 status;
4105 
4106 	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4107 
4108 	e1000_get_cfg_done_generic(hw);
4109 
4110 	/* Wait for indication from h/w that it has completed basic config */
4111 	if (hw->mac.type >= e1000_ich10lan) {
4112 		e1000_lan_init_done_ich8lan(hw);
4113 	} else {
4114 		ret_val = e1000_get_auto_rd_done_generic(hw);
4115 		if (ret_val) {
4116 			/*
4117 			 * When auto config read does not complete, do not
4118 			 * return with an error. This can happen in situations
4119 			 * where there is no eeprom and prevents getting link.
4120 			 */
4121 			DEBUGOUT("Auto Read Done did not complete\n");
4122 			ret_val = E1000_SUCCESS;
4123 		}
4124 	}
4125 
4126 	/* Clear PHY Reset Asserted bit */
4127 	status = E1000_READ_REG(hw, E1000_STATUS);
4128 	if (status & E1000_STATUS_PHYRA)
4129 		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4130 	else
4131 		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4132 
4133 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4134 	if (hw->mac.type <= e1000_ich9lan) {
4135 		if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4136 		    (hw->phy.type == e1000_phy_igp_3)) {
4137 			e1000_phy_init_script_igp3(hw);
4138 		}
4139 	} else {
4140 		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4141 			/* Maybe we should do a basic PHY config */
4142 			DEBUGOUT("EEPROM not present\n");
4143 			ret_val = -E1000_ERR_CONFIG;
4144 		}
4145 	}
4146 
4147 	return ret_val;
4148 }
4149 
4150 /**
4151  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4152  * @hw: pointer to the HW structure
4153  *
4154  * In the case of a PHY power down to save power, or to turn off link during a
4155  * driver unload, or wake on lan is not enabled, remove the link.
4156  **/
4157 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4158 {
4159 	/* If the management interface is not enabled, then power down */
4160 	if (!(hw->mac.ops.check_mng_mode(hw) ||
4161 	      hw->phy.ops.check_reset_block(hw)))
4162 		e1000_power_down_phy_copper(hw);
4163 
4164 	return;
4165 }
4166 
4167 /**
4168  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4169  *  @hw: pointer to the HW structure
4170  *
4171  *  Clears hardware counters specific to the silicon family and calls
4172  *  clear_hw_cntrs_generic to clear all general purpose counters.
4173  **/
4174 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4175 {
4176 	u16 phy_data;
4177 	s32 ret_val;
4178 
4179 	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4180 
4181 	e1000_clear_hw_cntrs_base_generic(hw);
4182 
4183 	E1000_READ_REG(hw, E1000_ALGNERRC);
4184 	E1000_READ_REG(hw, E1000_RXERRC);
4185 	E1000_READ_REG(hw, E1000_TNCRS);
4186 	E1000_READ_REG(hw, E1000_CEXTERR);
4187 	E1000_READ_REG(hw, E1000_TSCTC);
4188 	E1000_READ_REG(hw, E1000_TSCTFC);
4189 
4190 	E1000_READ_REG(hw, E1000_MGTPRC);
4191 	E1000_READ_REG(hw, E1000_MGTPDC);
4192 	E1000_READ_REG(hw, E1000_MGTPTC);
4193 
4194 	E1000_READ_REG(hw, E1000_IAC);
4195 	E1000_READ_REG(hw, E1000_ICRXOC);
4196 
4197 	/* Clear PHY statistics registers */
4198 	if ((hw->phy.type == e1000_phy_82578) ||
4199 	    (hw->phy.type == e1000_phy_82579) ||
4200 	    (hw->phy.type == e1000_phy_82577)) {
4201 		ret_val = hw->phy.ops.acquire(hw);
4202 		if (ret_val)
4203 			return;
4204 		ret_val = hw->phy.ops.set_page(hw,
4205 		                               HV_STATS_PAGE << IGP_PAGE_SHIFT);
4206 		if (ret_val)
4207 			goto release;
4208 		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4209 		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4210 		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4211 		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4212 		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4213 		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4214 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4215 		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4216 		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4217 		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4218 		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4219 		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4220 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4221 		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4222 release:
4223 		hw->phy.ops.release(hw);
4224 	}
4225 }
4226 
4227