xref: /openbsd/sys/dev/pci/ixgbe.c (revision 274d7c50)
1 /*	$OpenBSD: ixgbe.c,v 1.23 2016/12/02 15:22:57 mikeb Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2015, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 299200 2016-05-06 22:54:56Z pfg $*/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.c 299200 2016-05-06 22:54:56Z pfg $*/
37 
38 #include <dev/pci/ixgbe.h>
39 
40 #ifdef __sparc64__
41 #include <dev/ofw/openfirm.h>
42 #endif
43 
44 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
45 				       uint16_t link_status);
46 
47 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
48 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
49 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
50 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw);
51 void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
52 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
53 				 uint16_t count);
54 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count);
55 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
56 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
57 void ixgbe_release_eeprom(struct ixgbe_hw *hw);
58 
59 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr);
60 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
61 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
62 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
63 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
64 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
65 			   uint32_t lp_reg, uint32_t adv_sym, uint32_t adv_asm,
66 			   uint32_t lp_sym, uint32_t lp_asm);
67 
68 int32_t prot_autoc_read_generic(struct ixgbe_hw *, bool *, uint32_t *);
69 int32_t prot_autoc_write_generic(struct ixgbe_hw *, uint32_t, bool);
70 
71 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan);
72 
73 /* MBX */
74 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
75 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
76 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw);
77 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask,
78 			       int32_t index);
79 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number);
80 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number);
81 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number);
82 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number);
83 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
84 			   uint16_t vf_number);
85 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
86 			  uint16_t vf_number);
87 
88 
89 /**
90  *  ixgbe_init_ops_generic - Inits function ptrs
91  *  @hw: pointer to the hardware structure
92  *
93  *  Initialize the function pointers.
94  **/
95 int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
96 {
97 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
98 	struct ixgbe_mac_info *mac = &hw->mac;
99 	uint32_t eec = IXGBE_READ_REG(hw, IXGBE_EEC);
100 
101 	DEBUGFUNC("ixgbe_init_ops_generic");
102 
103 	/* EEPROM */
104 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
105 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
106 	if (eec & IXGBE_EEC_PRES)
107 		eeprom->ops.read = ixgbe_read_eerd_generic;
108 	else
109 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
110 	eeprom->ops.write = ixgbe_write_eeprom_generic;
111 	eeprom->ops.validate_checksum =
112 				      ixgbe_validate_eeprom_checksum_generic;
113 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
114 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
115 
116 	/* MAC */
117 	mac->ops.init_hw = ixgbe_init_hw_generic;
118 	mac->ops.reset_hw = NULL;
119 	mac->ops.start_hw = ixgbe_start_hw_generic;
120 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
121 	mac->ops.get_media_type = NULL;
122 	mac->ops.get_supported_physical_layer = NULL;
123 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
124 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
125 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
126 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
127 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
128 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
129 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
130 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
131 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
132 
133 	/* LEDs */
134 	mac->ops.led_on = ixgbe_led_on_generic;
135 	mac->ops.led_off = ixgbe_led_off_generic;
136 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
137 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
138 
139 	/* RAR, Multicast, VLAN */
140 	mac->ops.set_rar = ixgbe_set_rar_generic;
141 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
142 	mac->ops.insert_mac_addr = NULL;
143 	mac->ops.set_vmdq = NULL;
144 	mac->ops.clear_vmdq = NULL;
145 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
146 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
147 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
148 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
149 	mac->ops.clear_vfta = NULL;
150 	mac->ops.set_vfta = NULL;
151 	mac->ops.init_uta_tables = NULL;
152 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
153 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
154 
155 	/* Flow Control */
156 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
157 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
158 
159 	/* Link */
160 	mac->ops.get_link_capabilities = NULL;
161 	mac->ops.setup_link = NULL;
162 	mac->ops.check_link = NULL;
163 
164 	return IXGBE_SUCCESS;
165 }
166 
167 /**
168  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
169  * of flow control
170  * @hw: pointer to hardware structure
171  *
172  * This function returns TRUE if the device supports flow control
173  * autonegotiation, and FALSE if it does not.
174  *
175  **/
176 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
177 {
178 	bool supported = FALSE;
179 	ixgbe_link_speed speed;
180 	bool link_up;
181 
182 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
183 
184 	switch (hw->phy.media_type) {
185 	case ixgbe_media_type_fiber_fixed:
186 	case ixgbe_media_type_fiber_qsfp:
187 	case ixgbe_media_type_fiber:
188 		hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
189 		/* if link is down, assume supported */
190 		if (link_up)
191 			supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
192 				TRUE : FALSE;
193 		else
194 			supported = TRUE;
195 		break;
196 	case ixgbe_media_type_backplane:
197 		supported = TRUE;
198 		break;
199 	case ixgbe_media_type_copper:
200 		/* only some copper devices support flow control autoneg */
201 		switch (hw->device_id) {
202 		case IXGBE_DEV_ID_82599_T3_LOM:
203 		case IXGBE_DEV_ID_X540T:
204 		case IXGBE_DEV_ID_X540T1:
205 		case IXGBE_DEV_ID_X540_BYPASS:
206 		case IXGBE_DEV_ID_X550T:
207 		case IXGBE_DEV_ID_X550T1:
208 		case IXGBE_DEV_ID_X550EM_X_10G_T:
209 			supported = TRUE;
210 			break;
211 		default:
212 			supported = FALSE;
213 		}
214 	default:
215 		break;
216 	}
217 
218 	if (!supported) {
219 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
220 		      "Device %x does not support flow control autoneg",
221 		      hw->device_id);
222 	}
223 
224 	return supported;
225 }
226 
227 /**
228  *  ixgbe_setup_fc_generic - Set up flow control
229  *  @hw: pointer to hardware structure
230  *
231  *  Called at init time to set up flow control.
232  **/
233 int32_t ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
234 {
235 	int32_t ret_val = IXGBE_SUCCESS;
236 	uint32_t reg = 0, reg_bp = 0;
237 	uint16_t reg_cu = 0;
238 	bool locked = FALSE;
239 
240 	DEBUGFUNC("ixgbe_setup_fc");
241 
242 	/* Validate the requested mode */
243 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
244 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
245 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
246 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
247 		goto out;
248 	}
249 
250 	/*
251 	 * 10gig parts do not have a word in the EEPROM to determine the
252 	 * default flow control setting, so we explicitly set it to full.
253 	 */
254 	if (hw->fc.requested_mode == ixgbe_fc_default)
255 		hw->fc.requested_mode = ixgbe_fc_full;
256 
257 	/*
258 	 * Set up the 1G and 10G flow control advertisement registers so the
259 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
260 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
261 	 */
262 	switch (hw->phy.media_type) {
263 	case ixgbe_media_type_backplane:
264 		/* some MAC's need RMW protection on AUTOC */
265 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
266 		if (ret_val != IXGBE_SUCCESS)
267 			goto out;
268 
269 		/* only backplane uses autoc so fall though */
270 	case ixgbe_media_type_fiber_fixed:
271 	case ixgbe_media_type_fiber_qsfp:
272 	case ixgbe_media_type_fiber:
273 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
274 
275 		break;
276 	case ixgbe_media_type_copper:
277 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
278 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
279 		break;
280 	default:
281 		break;
282 	}
283 
284 	/*
285 	 * The possible values of fc.requested_mode are:
286 	 * 0: Flow control is completely disabled
287 	 * 1: Rx flow control is enabled (we can receive pause frames,
288 	 *    but not send pause frames).
289 	 * 2: Tx flow control is enabled (we can send pause frames but
290 	 *    we do not support receiving pause frames).
291 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
292 	 * other: Invalid.
293 	 */
294 	switch (hw->fc.requested_mode) {
295 	case ixgbe_fc_none:
296 		/* Flow control completely disabled by software override. */
297 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
298 		if (hw->phy.media_type == ixgbe_media_type_backplane)
299 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
300 				    IXGBE_AUTOC_ASM_PAUSE);
301 		else if (hw->phy.media_type == ixgbe_media_type_copper)
302 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
303 		break;
304 	case ixgbe_fc_tx_pause:
305 		/*
306 		 * Tx Flow control is enabled, and Rx Flow control is
307 		 * disabled by software override.
308 		 */
309 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
310 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
311 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
312 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
313 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
314 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
315 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
316 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
317 		}
318 		break;
319 	case ixgbe_fc_rx_pause:
320 		/*
321 		 * Rx Flow control is enabled and Tx Flow control is
322 		 * disabled by software override. Since there really
323 		 * isn't a way to advertise that we are capable of RX
324 		 * Pause ONLY, we will advertise that we support both
325 		 * symmetric and asymmetric Rx PAUSE, as such we fall
326 		 * through to the fc_full statement.  Later, we will
327 		 * disable the adapter's ability to send PAUSE frames.
328 		 */
329 	case ixgbe_fc_full:
330 		/* Flow control (both Rx and Tx) is enabled by SW override. */
331 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
332 		if (hw->phy.media_type == ixgbe_media_type_backplane)
333 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
334 				  IXGBE_AUTOC_ASM_PAUSE;
335 		else if (hw->phy.media_type == ixgbe_media_type_copper)
336 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
337 		break;
338 	default:
339 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
340 			     "Flow control param set incorrectly\n");
341 		ret_val = IXGBE_ERR_CONFIG;
342 		goto out;
343 		break;
344 	}
345 
346 	if (hw->mac.type < ixgbe_mac_X540) {
347 		/*
348 		 * Enable auto-negotiation between the MAC & PHY;
349 		 * the MAC will advertise clause 37 flow control.
350 		 */
351 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
352 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
353 
354 		/* Disable AN timeout */
355 		if (hw->fc.strict_ieee)
356 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
357 
358 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
359 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
360 	}
361 
362 	/*
363 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
364 	 * and copper. There is no need to set the PCS1GCTL register.
365 	 *
366 	 */
367 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
368 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
369 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
370 		if (ret_val)
371 			goto out;
372 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
373 		    (ixgbe_device_supports_autoneg_fc(hw))) {
374 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
375 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
376 	}
377 
378 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
379 out:
380 	return ret_val;
381 }
382 
383 /**
384  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
385  *  @hw: pointer to hardware structure
386  *
387  *  Starts the hardware by filling the bus info structure and media type, clears
388  *  all on chip counters, initializes receive address registers, multicast
389  *  table, VLAN filter table, calls routine to set up link and flow control
390  *  settings, and leaves transmit and receive units disabled and uninitialized
391  **/
392 int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
393 {
394 	int32_t ret_val = IXGBE_SUCCESS;
395 	uint32_t ctrl_ext;
396 
397 	DEBUGFUNC("ixgbe_start_hw_generic");
398 
399 	/* Set the media type */
400 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
401 
402 	/* PHY ops initialization must be done in reset_hw() */
403 
404 	/* Clear the VLAN filter table */
405 	hw->mac.ops.clear_vfta(hw);
406 
407 	/* Clear statistics registers */
408 	hw->mac.ops.clear_hw_cntrs(hw);
409 
410 	/* Set No Snoop Disable */
411 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
412 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
413 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
414 	IXGBE_WRITE_FLUSH(hw);
415 
416 	/* Setup flow control */
417 	if (hw->mac.ops.setup_fc) {
418 		ret_val = hw->mac.ops.setup_fc(hw);
419 		if (ret_val != IXGBE_SUCCESS)
420 			goto out;
421 	}
422 
423 	/* Clear adapter stopped flag */
424 	hw->adapter_stopped = FALSE;
425 
426 out:
427 	return ret_val;
428 }
429 
430 /**
431  *  ixgbe_start_hw_gen2 - Init sequence for common device family
432  *  @hw: pointer to hw structure
433  *
434  * Performs the init sequence common to the second generation
435  * of 10 GbE devices.
436  * Devices in the second generation:
437  *     82599
438  *     X540
439  **/
440 int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
441 {
442 	uint32_t i;
443 	uint32_t regval;
444 
445 	/* Clear the rate limiters */
446 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
447 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
448 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
449 	}
450 	IXGBE_WRITE_FLUSH(hw);
451 
452 	/* Disable relaxed ordering */
453 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
454 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
455 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
456 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
457 	}
458 
459 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
460 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
461 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
462 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
463 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
464 	}
465 
466 	return IXGBE_SUCCESS;
467 }
468 
469 /**
470  *  ixgbe_init_hw_generic - Generic hardware initialization
471  *  @hw: pointer to hardware structure
472  *
473  *  Initialize the hardware by resetting the hardware, filling the bus info
474  *  structure and media type, clears all on chip counters, initializes receive
475  *  address registers, multicast table, VLAN filter table, calls routine to set
476  *  up link and flow control settings, and leaves transmit and receive units
477  *  disabled and uninitialized
478  **/
479 int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw)
480 {
481 	int32_t status;
482 
483 	DEBUGFUNC("ixgbe_init_hw_generic");
484 
485 	/* Reset the hardware */
486 	status = hw->mac.ops.reset_hw(hw);
487 
488 	if (status == IXGBE_SUCCESS) {
489 		/* Start the HW */
490 		status = hw->mac.ops.start_hw(hw);
491 	}
492 
493 	return status;
494 }
495 
496 /**
497  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
498  *  @hw: pointer to hardware structure
499  *
500  *  Clears all hardware statistics counters by reading them from the hardware
501  *  Statistics counters are clear on read.
502  **/
503 int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
504 {
505 	uint16_t i = 0;
506 
507 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
508 
509 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
510 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
511 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
512 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
513 	for (i = 0; i < 8; i++)
514 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
515 
516 	IXGBE_READ_REG(hw, IXGBE_MLFC);
517 	IXGBE_READ_REG(hw, IXGBE_MRFC);
518 	IXGBE_READ_REG(hw, IXGBE_RLEC);
519 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
520 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
521 	if (hw->mac.type >= ixgbe_mac_82599EB) {
522 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
523 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
524 	} else {
525 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
526 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
527 	}
528 
529 	for (i = 0; i < 8; i++) {
530 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
531 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
532 		if (hw->mac.type >= ixgbe_mac_82599EB) {
533 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
534 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
535 		} else {
536 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
537 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
538 		}
539 	}
540 	if (hw->mac.type >= ixgbe_mac_82599EB)
541 		for (i = 0; i < 8; i++)
542 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
543 	IXGBE_READ_REG(hw, IXGBE_PRC64);
544 	IXGBE_READ_REG(hw, IXGBE_PRC127);
545 	IXGBE_READ_REG(hw, IXGBE_PRC255);
546 	IXGBE_READ_REG(hw, IXGBE_PRC511);
547 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
548 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
549 	IXGBE_READ_REG(hw, IXGBE_GPRC);
550 	IXGBE_READ_REG(hw, IXGBE_BPRC);
551 	IXGBE_READ_REG(hw, IXGBE_MPRC);
552 	IXGBE_READ_REG(hw, IXGBE_GPTC);
553 	IXGBE_READ_REG(hw, IXGBE_GORCL);
554 	IXGBE_READ_REG(hw, IXGBE_GORCH);
555 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
556 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
557 	if (hw->mac.type == ixgbe_mac_82598EB)
558 		for (i = 0; i < 8; i++)
559 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
560 	IXGBE_READ_REG(hw, IXGBE_RUC);
561 	IXGBE_READ_REG(hw, IXGBE_RFC);
562 	IXGBE_READ_REG(hw, IXGBE_ROC);
563 	IXGBE_READ_REG(hw, IXGBE_RJC);
564 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
565 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
566 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
567 	IXGBE_READ_REG(hw, IXGBE_TORL);
568 	IXGBE_READ_REG(hw, IXGBE_TORH);
569 	IXGBE_READ_REG(hw, IXGBE_TPR);
570 	IXGBE_READ_REG(hw, IXGBE_TPT);
571 	IXGBE_READ_REG(hw, IXGBE_PTC64);
572 	IXGBE_READ_REG(hw, IXGBE_PTC127);
573 	IXGBE_READ_REG(hw, IXGBE_PTC255);
574 	IXGBE_READ_REG(hw, IXGBE_PTC511);
575 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
576 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
577 	IXGBE_READ_REG(hw, IXGBE_MPTC);
578 	IXGBE_READ_REG(hw, IXGBE_BPTC);
579 	for (i = 0; i < 16; i++) {
580 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
581 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
582 		if (hw->mac.type >= ixgbe_mac_82599EB) {
583 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
584 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
585 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
586 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
587 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
588 		} else {
589 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
590 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
591 		}
592 	}
593 
594 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
595 		if (hw->phy.id == 0)
596 			ixgbe_identify_phy(hw);
597 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
598 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
599 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
600 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
601 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
602 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
603 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
604 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
605 	}
606 
607 	return IXGBE_SUCCESS;
608 }
609 
610 /**
611  *  ixgbe_get_mac_addr_generic - Generic get MAC address
612  *  @hw: pointer to hardware structure
613  *  @mac_addr: Adapter MAC address
614  *
615  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
616  *  A reset of the adapter must be performed prior to calling this function
617  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
618  **/
619 int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *mac_addr)
620 {
621 	uint32_t rar_high;
622 	uint32_t rar_low;
623 	uint16_t i;
624 
625 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
626 
627 #ifdef __sparc64__
628 	struct ixgbe_osdep *os = hw->back;
629 
630 	if (OF_getprop(PCITAG_NODE(os->os_pa.pa_tag), "local-mac-address",
631 	    mac_addr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
632 		return IXGBE_SUCCESS;
633 #endif
634 
635 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
636 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
637 
638 	for (i = 0; i < 4; i++)
639 		mac_addr[i] = (uint8_t)(rar_low >> (i*8));
640 
641 	for (i = 0; i < 2; i++)
642 		mac_addr[i+4] = (uint8_t)(rar_high >> (i*8));
643 
644 	return IXGBE_SUCCESS;
645 }
646 
647 /**
648  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
649  *  @hw: pointer to hardware structure
650  *  @link_status: the link status returned by the PCI config space
651  *
652  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
653  **/
654 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
655 				       uint16_t link_status)
656 {
657 	struct ixgbe_mac_info *mac = &hw->mac;
658 
659 	hw->bus.type = ixgbe_bus_type_pci_express;
660 
661 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
662 	case IXGBE_PCI_LINK_WIDTH_1:
663 		hw->bus.width = ixgbe_bus_width_pcie_x1;
664 		break;
665 	case IXGBE_PCI_LINK_WIDTH_2:
666 		hw->bus.width = ixgbe_bus_width_pcie_x2;
667 		break;
668 	case IXGBE_PCI_LINK_WIDTH_4:
669 		hw->bus.width = ixgbe_bus_width_pcie_x4;
670 		break;
671 	case IXGBE_PCI_LINK_WIDTH_8:
672 		hw->bus.width = ixgbe_bus_width_pcie_x8;
673 		break;
674 	default:
675 		hw->bus.width = ixgbe_bus_width_unknown;
676 		break;
677 	}
678 
679 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
680 	case IXGBE_PCI_LINK_SPEED_2500:
681 		hw->bus.speed = ixgbe_bus_speed_2500;
682 		break;
683 	case IXGBE_PCI_LINK_SPEED_5000:
684 		hw->bus.speed = ixgbe_bus_speed_5000;
685 		break;
686 	case IXGBE_PCI_LINK_SPEED_8000:
687 		hw->bus.speed = ixgbe_bus_speed_8000;
688 		break;
689 	default:
690 		hw->bus.speed = ixgbe_bus_speed_unknown;
691 		break;
692 	}
693 
694 	mac->ops.set_lan_id(hw);
695 }
696 
697 /**
698  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
699  *  @hw: pointer to hardware structure
700  *
701  *  Gets the PCI bus info (speed, width, type) then calls helper function to
702  *  store this data within the ixgbe_hw structure.
703  **/
704 int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
705 {
706 	uint16_t link_status;
707 
708 	DEBUGFUNC("ixgbe_get_bus_info_generic");
709 
710 	/* Get the negotiated link width and speed from PCI config space */
711 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
712 
713 	ixgbe_set_pci_config_data_generic(hw, link_status);
714 
715 	return IXGBE_SUCCESS;
716 }
717 
718 /**
719  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
720  *  @hw: pointer to the HW structure
721  *
722  *  Determines the LAN function id by reading memory-mapped registers
723  *  and swaps the port value if requested.
724  **/
725 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
726 {
727 	struct ixgbe_bus_info *bus = &hw->bus;
728 	uint32_t reg;
729 
730 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
731 
732 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
733 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
734 	bus->lan_id = bus->func;
735 
736 	/* check for a port swap */
737 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
738 	if (reg & IXGBE_FACTPS_LFS)
739 		bus->func ^= 0x1;
740 }
741 
742 /**
743  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
744  *  @hw: pointer to hardware structure
745  *
746  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
747  *  disables transmit and receive units. The adapter_stopped flag is used by
748  *  the shared code and drivers to determine if the adapter is in a stopped
749  *  state and should not touch the hardware.
750  **/
751 int32_t ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
752 {
753 	uint32_t reg_val;
754 	uint16_t i;
755 
756 	DEBUGFUNC("ixgbe_stop_adapter_generic");
757 
758 	/*
759 	 * Set the adapter_stopped flag so other driver functions stop touching
760 	 * the hardware
761 	 */
762 	hw->adapter_stopped = TRUE;
763 
764 	/* Disable the receive unit */
765 	ixgbe_disable_rx(hw);
766 
767 	/* Clear interrupt mask to stop interrupts from being generated */
768 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
769 
770 	/* Clear any pending interrupts, flush previous writes */
771 	IXGBE_READ_REG(hw, IXGBE_EICR);
772 
773 	/* Disable the transmit unit.  Each queue must be disabled. */
774 	for (i = 0; i < hw->mac.max_tx_queues; i++)
775 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
776 
777 	/* Disable the receive unit by stopping each queue */
778 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
779 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
780 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
781 		reg_val |= IXGBE_RXDCTL_SWFLSH;
782 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
783 	}
784 
785 	/* flush all queues disables */
786 	IXGBE_WRITE_FLUSH(hw);
787 	msec_delay(2);
788 
789 	/*
790 	 * Prevent the PCI-E bus from hanging by disabling PCI-E master
791 	 * access and verify no pending requests
792 	 */
793 	return ixgbe_disable_pcie_master(hw);
794 }
795 
796 /**
797  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
798  *  @hw: pointer to hardware structure
799  *  @index: led number to turn on
800  **/
801 int32_t ixgbe_led_on_generic(struct ixgbe_hw *hw, uint32_t index)
802 {
803 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
804 
805 	DEBUGFUNC("ixgbe_led_on_generic");
806 
807 	/* To turn on the LED, set mode to ON. */
808 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
809 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
810 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
811 	IXGBE_WRITE_FLUSH(hw);
812 
813 	return IXGBE_SUCCESS;
814 }
815 
816 /**
817  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
818  *  @hw: pointer to hardware structure
819  *  @index: led number to turn off
820  **/
821 int32_t ixgbe_led_off_generic(struct ixgbe_hw *hw, uint32_t index)
822 {
823 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
824 
825 	DEBUGFUNC("ixgbe_led_off_generic");
826 
827 	/* To turn off the LED, set mode to OFF. */
828 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
829 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
830 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
831 	IXGBE_WRITE_FLUSH(hw);
832 
833 	return IXGBE_SUCCESS;
834 }
835 
836 /**
837  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
838  *  @hw: pointer to hardware structure
839  *
840  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
841  *  ixgbe_hw struct in order to set up EEPROM access.
842  **/
843 int32_t ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
844 {
845 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
846 	uint32_t eec;
847 	uint16_t eeprom_size;
848 
849 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
850 
851 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
852 		eeprom->type = ixgbe_eeprom_none;
853 		/* Set default semaphore delay to 10ms which is a well
854 		 * tested value */
855 		eeprom->semaphore_delay = 10;
856 		/* Clear EEPROM page size, it will be initialized as needed */
857 		eeprom->word_page_size = 0;
858 
859 		/*
860 		 * Check for EEPROM present first.
861 		 * If not present leave as none
862 		 */
863 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
864 		if (eec & IXGBE_EEC_PRES) {
865 			eeprom->type = ixgbe_eeprom_spi;
866 
867 			/*
868 			 * SPI EEPROM is assumed here.  This code would need to
869 			 * change if a future EEPROM is not SPI.
870 			 */
871 			eeprom_size = (uint16_t)((eec & IXGBE_EEC_SIZE) >>
872 					    IXGBE_EEC_SIZE_SHIFT);
873 			eeprom->word_size = 1 << (eeprom_size +
874 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
875 		}
876 
877 		if (eec & IXGBE_EEC_ADDR_SIZE)
878 			eeprom->address_bits = 16;
879 		else
880 			eeprom->address_bits = 8;
881 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
882 			  "%d\n", eeprom->type, eeprom->word_size,
883 			  eeprom->address_bits);
884 	}
885 
886 	return IXGBE_SUCCESS;
887 }
888 
889 /**
890  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
891  *  @hw: pointer to hardware structure
892  *  @offset: offset within the EEPROM to be written to
893  *  @words: number of word(s)
894  *  @data: 16 bit word(s) to be written to the EEPROM
895  *
896  *  If ixgbe_eeprom_update_checksum is not called after this function, the
897  *  EEPROM will most likely contain an invalid checksum.
898  **/
899 static int32_t ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, uint16_t offset,
900 					      uint16_t words, uint16_t *data)
901 {
902 	int32_t status;
903 	uint16_t word;
904 	uint16_t page_size;
905 	uint16_t i;
906 	uint8_t write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
907 
908 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
909 
910 	/* Prepare the EEPROM for writing  */
911 	status = ixgbe_acquire_eeprom(hw);
912 
913 	if (status == IXGBE_SUCCESS) {
914 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
915 			ixgbe_release_eeprom(hw);
916 			status = IXGBE_ERR_EEPROM;
917 		}
918 	}
919 
920 	if (status == IXGBE_SUCCESS) {
921 		for (i = 0; i < words; i++) {
922 			ixgbe_standby_eeprom(hw);
923 
924 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
925 			ixgbe_shift_out_eeprom_bits(hw,
926 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
927 						   IXGBE_EEPROM_OPCODE_BITS);
928 
929 			ixgbe_standby_eeprom(hw);
930 
931 			/*
932 			 * Some SPI eeproms use the 8th address bit embedded
933 			 * in the opcode
934 			 */
935 			if ((hw->eeprom.address_bits == 8) &&
936 			    ((offset + i) >= 128))
937 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
938 
939 			/* Send the Write command (8-bit opcode + addr) */
940 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
941 						    IXGBE_EEPROM_OPCODE_BITS);
942 			ixgbe_shift_out_eeprom_bits(hw, (uint16_t)((offset + i) * 2),
943 						    hw->eeprom.address_bits);
944 
945 			page_size = hw->eeprom.word_page_size;
946 
947 			/* Send the data in burst via SPI*/
948 			do {
949 				word = data[i];
950 				word = (word >> 8) | (word << 8);
951 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
952 
953 				if (page_size == 0)
954 					break;
955 
956 				/* do not wrap around page */
957 				if (((offset + i) & (page_size - 1)) ==
958 				    (page_size - 1))
959 					break;
960 			} while (++i < words);
961 
962 			ixgbe_standby_eeprom(hw);
963 			msec_delay(10);
964 		}
965 		/* Done with writing - release the EEPROM */
966 		ixgbe_release_eeprom(hw);
967 	}
968 
969 	return status;
970 }
971 
972 /**
973  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
974  *  @hw: pointer to hardware structure
975  *  @offset: offset within the EEPROM to be written to
976  *  @data: 16 bit word to be written to the EEPROM
977  *
978  *  If ixgbe_eeprom_update_checksum is not called after this function, the
979  *  EEPROM will most likely contain an invalid checksum.
980  **/
981 int32_t ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
982 {
983 	int32_t status;
984 
985 	DEBUGFUNC("ixgbe_write_eeprom_generic");
986 
987 	hw->eeprom.ops.init_params(hw);
988 
989 	if (offset >= hw->eeprom.word_size) {
990 		status = IXGBE_ERR_EEPROM;
991 		goto out;
992 	}
993 
994 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
995 
996 out:
997 	return status;
998 }
999 
1000 /**
1001  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1002  *  @hw: pointer to hardware structure
1003  *  @offset: offset within the EEPROM to be read
1004  *  @words: number of word(s)
1005  *  @data: read 16 bit word(s) from EEPROM
1006  *
1007  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1008  **/
1009 static int32_t ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, uint16_t offset,
1010 					     uint16_t words, uint16_t *data)
1011 {
1012 	int32_t status;
1013 	uint16_t word_in;
1014 	uint8_t read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1015 	uint16_t i;
1016 
1017 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1018 
1019 	/* Prepare the EEPROM for reading  */
1020 	status = ixgbe_acquire_eeprom(hw);
1021 
1022 	if (status == IXGBE_SUCCESS) {
1023 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1024 			ixgbe_release_eeprom(hw);
1025 			status = IXGBE_ERR_EEPROM;
1026 		}
1027 	}
1028 
1029 	if (status == IXGBE_SUCCESS) {
1030 		for (i = 0; i < words; i++) {
1031 			ixgbe_standby_eeprom(hw);
1032 			/*
1033 			 * Some SPI eeproms use the 8th address bit embedded
1034 			 * in the opcode
1035 			 */
1036 			if ((hw->eeprom.address_bits == 8) &&
1037 			    ((offset + i) >= 128))
1038 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1039 
1040 			/* Send the READ command (opcode + addr) */
1041 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1042 						    IXGBE_EEPROM_OPCODE_BITS);
1043 			ixgbe_shift_out_eeprom_bits(hw, (uint16_t)((offset + i) * 2),
1044 						    hw->eeprom.address_bits);
1045 
1046 			/* Read the data. */
1047 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1048 			data[i] = (word_in >> 8) | (word_in << 8);
1049 		}
1050 
1051 		/* End this read operation */
1052 		ixgbe_release_eeprom(hw);
1053 	}
1054 
1055 	return status;
1056 }
1057 
1058 /**
1059  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1060  *  @hw: pointer to hardware structure
1061  *  @offset: offset within the EEPROM to be read
1062  *  @data: read 16 bit value from EEPROM
1063  *
1064  *  Reads 16 bit value from EEPROM through bit-bang method
1065  **/
1066 int32_t ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, uint16_t offset,
1067 				       uint16_t *data)
1068 {
1069 	int32_t status;
1070 
1071 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1072 
1073 	hw->eeprom.ops.init_params(hw);
1074 
1075 	if (offset >= hw->eeprom.word_size) {
1076 		status = IXGBE_ERR_EEPROM;
1077 		goto out;
1078 	}
1079 
1080 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1081 
1082 out:
1083 	return status;
1084 }
1085 
1086 /**
1087  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1088  *  @hw: pointer to hardware structure
1089  *  @offset: offset of word in the EEPROM to read
1090  *  @words: number of word(s)
1091  *  @data: 16 bit word(s) from the EEPROM
1092  *
1093  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1094  **/
1095 int32_t ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, uint16_t offset,
1096 				   uint16_t words, uint16_t *data)
1097 {
1098 	uint32_t eerd;
1099 	int32_t status = IXGBE_SUCCESS;
1100 	uint32_t i;
1101 
1102 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1103 
1104 	hw->eeprom.ops.init_params(hw);
1105 
1106 	if (words == 0) {
1107 		status = IXGBE_ERR_INVALID_ARGUMENT;
1108 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1109 		goto out;
1110 	}
1111 
1112 	if (offset >= hw->eeprom.word_size) {
1113 		status = IXGBE_ERR_EEPROM;
1114 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1115 		goto out;
1116 	}
1117 
1118 	for (i = 0; i < words; i++) {
1119 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1120 		       IXGBE_EEPROM_RW_REG_START;
1121 
1122 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1123 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1124 
1125 		if (status == IXGBE_SUCCESS) {
1126 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1127 				   IXGBE_EEPROM_RW_REG_DATA);
1128 		} else {
1129 			DEBUGOUT("Eeprom read timed out\n");
1130 			goto out;
1131 		}
1132 	}
1133 out:
1134 	return status;
1135 }
1136 
1137 /**
1138  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1139  *  @hw: pointer to hardware structure
1140  *  @offset: offset of  word in the EEPROM to read
1141  *  @data: word read from the EEPROM
1142  *
1143  *  Reads a 16 bit word from the EEPROM using the EERD register.
1144  **/
1145 int32_t ixgbe_read_eerd_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t *data)
1146 {
1147 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1148 }
1149 
1150 /**
1151  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1152  *  @hw: pointer to hardware structure
1153  *  @offset: offset of  word in the EEPROM to write
1154  *  @words: number of word(s)
1155  *  @data: word(s) write to the EEPROM
1156  *
1157  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1158  **/
1159 int32_t ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, uint16_t offset,
1160 				    uint16_t words, uint16_t *data)
1161 {
1162 	uint32_t eewr;
1163 	int32_t status = IXGBE_SUCCESS;
1164 	uint16_t i;
1165 
1166 	DEBUGFUNC("ixgbe_write_eewr_generic");
1167 
1168 	hw->eeprom.ops.init_params(hw);
1169 
1170 	if (words == 0) {
1171 		status = IXGBE_ERR_INVALID_ARGUMENT;
1172 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1173 		goto out;
1174 	}
1175 
1176 	if (offset >= hw->eeprom.word_size) {
1177 		status = IXGBE_ERR_EEPROM;
1178 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1179 		goto out;
1180 	}
1181 
1182 	for (i = 0; i < words; i++) {
1183 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1184 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1185 			IXGBE_EEPROM_RW_REG_START;
1186 
1187 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1188 		if (status != IXGBE_SUCCESS) {
1189 			DEBUGOUT("Eeprom write EEWR timed out\n");
1190 			goto out;
1191 		}
1192 
1193 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1194 
1195 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1196 		if (status != IXGBE_SUCCESS) {
1197 			DEBUGOUT("Eeprom write EEWR timed out\n");
1198 			goto out;
1199 		}
1200 	}
1201 
1202 out:
1203 	return status;
1204 }
1205 
1206 /**
1207  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1208  *  @hw: pointer to hardware structure
1209  *  @offset: offset of  word in the EEPROM to write
1210  *  @data: word write to the EEPROM
1211  *
1212  *  Write a 16 bit word to the EEPROM using the EEWR register.
1213  **/
1214 int32_t ixgbe_write_eewr_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
1215 {
1216 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1217 }
1218 
1219 /**
1220  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1221  *  @hw: pointer to hardware structure
1222  *  @ee_reg: EEPROM flag for polling
1223  *
1224  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1225  *  read or write is done respectively.
1226  **/
1227 int32_t ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, uint32_t ee_reg)
1228 {
1229 	uint32_t i;
1230 	uint32_t reg;
1231 	int32_t status = IXGBE_ERR_EEPROM;
1232 
1233 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1234 
1235 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1236 		if (ee_reg == IXGBE_NVM_POLL_READ)
1237 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1238 		else
1239 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1240 
1241 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1242 			status = IXGBE_SUCCESS;
1243 			break;
1244 		}
1245 		usec_delay(5);
1246 	}
1247 
1248 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1249 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1250 			     "EEPROM read/write done polling timed out");
1251 
1252 	return status;
1253 }
1254 
1255 /**
1256  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1257  *  @hw: pointer to hardware structure
1258  *
1259  *  Prepares EEPROM for access using bit-bang method. This function should
1260  *  be called before issuing a command to the EEPROM.
1261  **/
1262 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1263 {
1264 	int32_t status = IXGBE_SUCCESS;
1265 	uint32_t eec;
1266 	uint32_t i;
1267 
1268 	DEBUGFUNC("ixgbe_acquire_eeprom");
1269 
1270 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1271 	    != IXGBE_SUCCESS)
1272 		status = IXGBE_ERR_SWFW_SYNC;
1273 
1274 	if (status == IXGBE_SUCCESS) {
1275 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1276 
1277 		/* Request EEPROM Access */
1278 		eec |= IXGBE_EEC_REQ;
1279 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1280 
1281 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1282 			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1283 			if (eec & IXGBE_EEC_GNT)
1284 				break;
1285 			usec_delay(5);
1286 		}
1287 
1288 		/* Release if grant not acquired */
1289 		if (!(eec & IXGBE_EEC_GNT)) {
1290 			eec &= ~IXGBE_EEC_REQ;
1291 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1292 			DEBUGOUT("Could not acquire EEPROM grant\n");
1293 
1294 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1295 			status = IXGBE_ERR_EEPROM;
1296 		}
1297 
1298 		/* Setup EEPROM for Read/Write */
1299 		if (status == IXGBE_SUCCESS) {
1300 			/* Clear CS and SK */
1301 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1302 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1303 			IXGBE_WRITE_FLUSH(hw);
1304 			usec_delay(1);
1305 		}
1306 	}
1307 	return status;
1308 }
1309 
1310 /**
1311  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1312  *  @hw: pointer to hardware structure
1313  *
1314  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1315  **/
1316 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1317 {
1318 	int32_t status = IXGBE_ERR_EEPROM;
1319 	uint32_t timeout = 2000;
1320 	uint32_t i;
1321 	uint32_t swsm;
1322 
1323 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1324 
1325 
1326 	/* Get SMBI software semaphore between device drivers first */
1327 	for (i = 0; i < timeout; i++) {
1328 		/*
1329 		 * If the SMBI bit is 0 when we read it, then the bit will be
1330 		 * set and we have the semaphore
1331 		 */
1332 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1333 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1334 			status = IXGBE_SUCCESS;
1335 			break;
1336 		}
1337 		usec_delay(50);
1338 	}
1339 
1340 	if (i == timeout) {
1341 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1342 			 "not granted.\n");
1343 		/*
1344 		 * this release is particularly important because our attempts
1345 		 * above to get the semaphore may have succeeded, and if there
1346 		 * was a timeout, we should unconditionally clear the semaphore
1347 		 * bits to free the driver to make progress
1348 		 */
1349 		ixgbe_release_eeprom_semaphore(hw);
1350 
1351 		usec_delay(50);
1352 		/*
1353 		 * one last try
1354 		 * If the SMBI bit is 0 when we read it, then the bit will be
1355 		 * set and we have the semaphore
1356 		 */
1357 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1358 		if (!(swsm & IXGBE_SWSM_SMBI))
1359 			status = IXGBE_SUCCESS;
1360 	}
1361 
1362 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1363 	if (status == IXGBE_SUCCESS) {
1364 		for (i = 0; i < timeout; i++) {
1365 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1366 
1367 			/* Set the SW EEPROM semaphore bit to request access */
1368 			swsm |= IXGBE_SWSM_SWESMBI;
1369 			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1370 
1371 			/*
1372 			 * If we set the bit successfully then we got the
1373 			 * semaphore.
1374 			 */
1375 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1376 			if (swsm & IXGBE_SWSM_SWESMBI)
1377 				break;
1378 
1379 			usec_delay(50);
1380 		}
1381 
1382 		/*
1383 		 * Release semaphores and return error if SW EEPROM semaphore
1384 		 * was not granted because we don't have access to the EEPROM
1385 		 */
1386 		if (i >= timeout) {
1387 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1388 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1389 			ixgbe_release_eeprom_semaphore(hw);
1390 			status = IXGBE_ERR_EEPROM;
1391 		}
1392 	} else {
1393 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1394 			     "Software semaphore SMBI between device drivers "
1395 			     "not granted.\n");
1396 	}
1397 
1398 	return status;
1399 }
1400 
1401 /**
1402  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1403  *  @hw: pointer to hardware structure
1404  *
1405  *  This function clears hardware semaphore bits.
1406  **/
1407 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1408 {
1409 	uint32_t swsm;
1410 
1411 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1412 
1413 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1414 
1415 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1416 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1417 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1418 	IXGBE_WRITE_FLUSH(hw);
1419 }
1420 
1421 /**
1422  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1423  *  @hw: pointer to hardware structure
1424  **/
1425 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1426 {
1427 	int32_t status = IXGBE_SUCCESS;
1428 	uint16_t i;
1429 	uint8_t spi_stat_reg;
1430 
1431 	DEBUGFUNC("ixgbe_ready_eeprom");
1432 
1433 	/*
1434 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1435 	 * EEPROM will signal that the command has been completed by clearing
1436 	 * bit 0 of the internal status register.  If it's not cleared within
1437 	 * 5 milliseconds, then error out.
1438 	 */
1439 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1440 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1441 					    IXGBE_EEPROM_OPCODE_BITS);
1442 		spi_stat_reg = (uint8_t)ixgbe_shift_in_eeprom_bits(hw, 8);
1443 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1444 			break;
1445 
1446 		usec_delay(5);
1447 		ixgbe_standby_eeprom(hw);
1448 	}
1449 
1450 	/*
1451 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1452 	 * devices (and only 0-5mSec on 5V devices)
1453 	 */
1454 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1455 		DEBUGOUT("SPI EEPROM Status error\n");
1456 		status = IXGBE_ERR_EEPROM;
1457 	}
1458 
1459 	return status;
1460 }
1461 
1462 /**
1463  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1464  *  @hw: pointer to hardware structure
1465  **/
1466 void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1467 {
1468 	uint32_t eec;
1469 
1470 	DEBUGFUNC("ixgbe_standby_eeprom");
1471 
1472 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1473 
1474 	/* Toggle CS to flush commands */
1475 	eec |= IXGBE_EEC_CS;
1476 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1477 	IXGBE_WRITE_FLUSH(hw);
1478 	usec_delay(1);
1479 	eec &= ~IXGBE_EEC_CS;
1480 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1481 	IXGBE_WRITE_FLUSH(hw);
1482 	usec_delay(1);
1483 }
1484 
1485 /**
1486  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1487  *  @hw: pointer to hardware structure
1488  *  @data: data to send to the EEPROM
1489  *  @count: number of bits to shift out
1490  **/
1491 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
1492 				 uint16_t count)
1493 {
1494 	uint32_t eec;
1495 	uint32_t mask;
1496 	uint32_t i;
1497 
1498 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1499 
1500 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1501 
1502 	/*
1503 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
1504 	 * one bit at a time.  Determine the starting bit based on count
1505 	 */
1506 	mask = 0x01 << (count - 1);
1507 
1508 	for (i = 0; i < count; i++) {
1509 		/*
1510 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1511 		 * "1", and then raising and then lowering the clock (the SK
1512 		 * bit controls the clock input to the EEPROM).  A "0" is
1513 		 * shifted out to the EEPROM by setting "DI" to "0" and then
1514 		 * raising and then lowering the clock.
1515 		 */
1516 		if (data & mask)
1517 			eec |= IXGBE_EEC_DI;
1518 		else
1519 			eec &= ~IXGBE_EEC_DI;
1520 
1521 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1522 		IXGBE_WRITE_FLUSH(hw);
1523 
1524 		usec_delay(1);
1525 
1526 		ixgbe_raise_eeprom_clk(hw, &eec);
1527 		ixgbe_lower_eeprom_clk(hw, &eec);
1528 
1529 		/*
1530 		 * Shift mask to signify next bit of data to shift in to the
1531 		 * EEPROM
1532 		 */
1533 		mask = mask >> 1;
1534 	}
1535 
1536 	/* We leave the "DI" bit set to "0" when we leave this routine. */
1537 	eec &= ~IXGBE_EEC_DI;
1538 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1539 	IXGBE_WRITE_FLUSH(hw);
1540 }
1541 
1542 /**
1543  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1544  *  @hw: pointer to hardware structure
1545  **/
1546 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
1547 {
1548 	uint32_t eec;
1549 	uint32_t i;
1550 	uint16_t data = 0;
1551 
1552 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1553 
1554 	/*
1555 	 * In order to read a register from the EEPROM, we need to shift
1556 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1557 	 * the clock input to the EEPROM (setting the SK bit), and then reading
1558 	 * the value of the "DO" bit.  During this "shifting in" process the
1559 	 * "DI" bit should always be clear.
1560 	 */
1561 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1562 
1563 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1564 
1565 	for (i = 0; i < count; i++) {
1566 		data = data << 1;
1567 		ixgbe_raise_eeprom_clk(hw, &eec);
1568 
1569 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1570 
1571 		eec &= ~(IXGBE_EEC_DI);
1572 		if (eec & IXGBE_EEC_DO)
1573 			data |= 1;
1574 
1575 		ixgbe_lower_eeprom_clk(hw, &eec);
1576 	}
1577 
1578 	return data;
1579 }
1580 
1581 /**
1582  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1583  *  @hw: pointer to hardware structure
1584  *  @eec: EEC register's current value
1585  **/
1586 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1587 {
1588 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
1589 
1590 	/*
1591 	 * Raise the clock input to the EEPROM
1592 	 * (setting the SK bit), then delay
1593 	 */
1594 	*eec = *eec | IXGBE_EEC_SK;
1595 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1596 	IXGBE_WRITE_FLUSH(hw);
1597 	usec_delay(1);
1598 }
1599 
1600 /**
1601  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1602  *  @hw: pointer to hardware structure
1603  *  @eecd: EECD's current value
1604  **/
1605 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1606 {
1607 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
1608 
1609 	/*
1610 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
1611 	 * delay
1612 	 */
1613 	*eec = *eec & ~IXGBE_EEC_SK;
1614 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1615 	IXGBE_WRITE_FLUSH(hw);
1616 	usec_delay(1);
1617 }
1618 
1619 /**
1620  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
1621  *  @hw: pointer to hardware structure
1622  **/
1623 void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1624 {
1625 	uint32_t eec;
1626 
1627 	DEBUGFUNC("ixgbe_release_eeprom");
1628 
1629 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1630 
1631 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
1632 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1633 
1634 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1635 	IXGBE_WRITE_FLUSH(hw);
1636 
1637 	usec_delay(1);
1638 
1639 	/* Stop requesting EEPROM access */
1640 	eec &= ~IXGBE_EEC_REQ;
1641 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1642 
1643 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1644 
1645 	/* Delay before attempt to obtain semaphore again to allow FW access */
1646 	msec_delay(hw->eeprom.semaphore_delay);
1647 }
1648 
1649 /**
1650  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1651  *  @hw: pointer to hardware structure
1652  *
1653  *  Returns a negative error code on error, or the 16-bit checksum
1654  **/
1655 int32_t ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1656 {
1657 	uint16_t i;
1658 	uint16_t j;
1659 	uint16_t checksum = 0;
1660 	uint16_t length = 0;
1661 	uint16_t pointer = 0;
1662 	uint16_t word = 0;
1663 
1664 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1665 
1666 	/* Include 0x0-0x3F in the checksum */
1667 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1668 		if (hw->eeprom.ops.read(hw, i, &word)) {
1669 			DEBUGOUT("EEPROM read failed\n");
1670 			return IXGBE_ERR_EEPROM;
1671 		}
1672 		checksum += word;
1673 	}
1674 
1675 	/* Include all data from pointers except for the fw pointer */
1676 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1677 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
1678 			DEBUGOUT("EEPROM read failed\n");
1679 			return IXGBE_ERR_EEPROM;
1680 		}
1681 
1682 		/* If the pointer seems invalid */
1683 		if (pointer == 0xFFFF || pointer == 0)
1684 			continue;
1685 
1686 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
1687 			DEBUGOUT("EEPROM read failed\n");
1688 			return IXGBE_ERR_EEPROM;
1689 		}
1690 
1691 		if (length == 0xFFFF || length == 0)
1692 			continue;
1693 
1694 		for (j = pointer + 1; j <= pointer + length; j++) {
1695 			if (hw->eeprom.ops.read(hw, j, &word)) {
1696 				DEBUGOUT("EEPROM read failed\n");
1697 				return IXGBE_ERR_EEPROM;
1698 			}
1699 			checksum += word;
1700 		}
1701 	}
1702 
1703 	checksum = (uint16_t)IXGBE_EEPROM_SUM - checksum;
1704 
1705 	return (int32_t)checksum;
1706 }
1707 
1708 /**
1709  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1710  *  @hw: pointer to hardware structure
1711  *  @checksum_val: calculated checksum
1712  *
1713  *  Performs checksum calculation and validates the EEPROM checksum.  If the
1714  *  caller does not need checksum_val, the value can be NULL.
1715  **/
1716 int32_t ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1717 					       uint16_t *checksum_val)
1718 {
1719 	int32_t status;
1720 	uint16_t checksum;
1721 	uint16_t read_checksum = 0;
1722 
1723 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1724 
1725 	/* Read the first word from the EEPROM. If this times out or fails, do
1726 	 * not continue or we could be in for a very long wait while every
1727 	 * EEPROM read fails
1728 	 */
1729 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1730 	if (status) {
1731 		DEBUGOUT("EEPROM read failed\n");
1732 		return status;
1733 	}
1734 
1735 	status = hw->eeprom.ops.calc_checksum(hw);
1736 	if (status < 0)
1737 		return status;
1738 
1739 	checksum = (uint16_t)(status & 0xffff);
1740 
1741 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1742 	if (status) {
1743 		DEBUGOUT("EEPROM read failed\n");
1744 		return status;
1745 	}
1746 
1747 	/* Verify read checksum from EEPROM is the same as
1748 	 * calculated checksum
1749 	 */
1750 	if (read_checksum != checksum)
1751 		status = IXGBE_ERR_EEPROM_CHECKSUM;
1752 
1753 	/* If the user cares, return the calculated checksum */
1754 	if (checksum_val)
1755 		*checksum_val = checksum;
1756 
1757 	return status;
1758 }
1759 
1760 /**
1761  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1762  *  @hw: pointer to hardware structure
1763  **/
1764 int32_t ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1765 {
1766 	int32_t status;
1767 	uint16_t checksum;
1768 
1769 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1770 
1771 	/* Read the first word from the EEPROM. If this times out or fails, do
1772 	 * not continue or we could be in for a very long wait while every
1773 	 * EEPROM read fails
1774 	 */
1775 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1776 	if (status) {
1777 		DEBUGOUT("EEPROM read failed\n");
1778 		return status;
1779 	}
1780 
1781 	status = hw->eeprom.ops.calc_checksum(hw);
1782 	if (status < 0)
1783 		return status;
1784 
1785 	checksum = (uint16_t)(status & 0xffff);
1786 
1787 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
1788 
1789 	return status;
1790 }
1791 
1792 /**
1793  *  ixgbe_validate_mac_addr - Validate MAC address
1794  *  @mac_addr: pointer to MAC address.
1795  *
1796  *  Tests a MAC address to ensure it is a valid Individual Address
1797  **/
1798 int32_t ixgbe_validate_mac_addr(uint8_t *mac_addr)
1799 {
1800 	int32_t status = IXGBE_SUCCESS;
1801 
1802 	DEBUGFUNC("ixgbe_validate_mac_addr");
1803 
1804 	/* Make sure it is not a multicast address */
1805 	if (IXGBE_IS_MULTICAST(mac_addr)) {
1806 		DEBUGOUT("MAC address is multicast\n");
1807 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1808 	/* Not a broadcast address */
1809 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
1810 		DEBUGOUT("MAC address is broadcast\n");
1811 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1812 	/* Reject the zero address */
1813 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1814 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1815 		DEBUGOUT("MAC address is all zeros\n");
1816 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1817 	}
1818 	return status;
1819 }
1820 
1821 /**
1822  *  ixgbe_set_rar_generic - Set Rx address register
1823  *  @hw: pointer to hardware structure
1824  *  @index: Receive address register to write
1825  *  @addr: Address to put into receive address register
1826  *  @vmdq: VMDq "set" or "pool" index
1827  *  @enable_addr: set flag that address is active
1828  *
1829  *  Puts an ethernet address into a receive address register.
1830  **/
1831 int32_t ixgbe_set_rar_generic(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
1832 			      uint32_t vmdq, uint32_t enable_addr)
1833 {
1834 	uint32_t rar_low, rar_high;
1835 	uint32_t rar_entries = hw->mac.num_rar_entries;
1836 
1837 	DEBUGFUNC("ixgbe_set_rar_generic");
1838 
1839 	/* Make sure we are using a valid rar index range */
1840 	if (index >= rar_entries) {
1841 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1842 			     "RAR index %d is out of range.\n", index);
1843 		return IXGBE_ERR_INVALID_ARGUMENT;
1844 	}
1845 
1846 	/* setup VMDq pool selection before this RAR gets enabled */
1847 	hw->mac.ops.set_vmdq(hw, index, vmdq);
1848 
1849 	/*
1850 	 * HW expects these in little endian so we reverse the byte
1851 	 * order from network order (big endian) to little endian
1852 	 */
1853 	rar_low = ((uint32_t)addr[0] |
1854 		   ((uint32_t)addr[1] << 8) |
1855 		   ((uint32_t)addr[2] << 16) |
1856 		   ((uint32_t)addr[3] << 24));
1857 	/*
1858 	 * Some parts put the VMDq setting in the extra RAH bits,
1859 	 * so save everything except the lower 16 bits that hold part
1860 	 * of the address and the address valid bit.
1861 	 */
1862 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1863 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1864 	rar_high |= ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
1865 
1866 	if (enable_addr != 0)
1867 		rar_high |= IXGBE_RAH_AV;
1868 
1869 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1870 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1871 
1872 	return IXGBE_SUCCESS;
1873 }
1874 
1875 /**
1876  *  ixgbe_clear_rar_generic - Remove Rx address register
1877  *  @hw: pointer to hardware structure
1878  *  @index: Receive address register to write
1879  *
1880  *  Clears an ethernet address from a receive address register.
1881  **/
1882 int32_t ixgbe_clear_rar_generic(struct ixgbe_hw *hw, uint32_t index)
1883 {
1884 	uint32_t rar_high;
1885 	uint32_t rar_entries = hw->mac.num_rar_entries;
1886 
1887 	DEBUGFUNC("ixgbe_clear_rar_generic");
1888 
1889 	/* Make sure we are using a valid rar index range */
1890 	if (index >= rar_entries) {
1891 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1892 			     "RAR index %d is out of range.\n", index);
1893 		return IXGBE_ERR_INVALID_ARGUMENT;
1894 	}
1895 
1896 	/*
1897 	 * Some parts put the VMDq setting in the extra RAH bits,
1898 	 * so save everything except the lower 16 bits that hold part
1899 	 * of the address and the address valid bit.
1900 	 */
1901 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1902 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1903 
1904 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1905 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1906 
1907 	/* clear VMDq pool/queue selection for this RAR */
1908 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1909 
1910 	return IXGBE_SUCCESS;
1911 }
1912 
1913 /**
1914  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1915  *  @hw: pointer to hardware structure
1916  *
1917  *  Places the MAC address in receive address register 0 and clears the rest
1918  *  of the receive address registers. Clears the multicast table. Assumes
1919  *  the receiver is in reset when the routine is called.
1920  **/
1921 int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1922 {
1923 	uint32_t i;
1924 	uint32_t rar_entries = hw->mac.num_rar_entries;
1925 
1926 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
1927 
1928 	/*
1929 	 * If the current mac address is valid, assume it is a software override
1930 	 * to the permanent address.
1931 	 * Otherwise, use the permanent address from the eeprom.
1932 	 */
1933 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1934 	    IXGBE_ERR_INVALID_MAC_ADDR) {
1935 		/* Get the MAC address from the RAR0 for later reference */
1936 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1937 
1938 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
1939 			  hw->mac.addr[0], hw->mac.addr[1],
1940 			  hw->mac.addr[2]);
1941 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1942 			  hw->mac.addr[4], hw->mac.addr[5]);
1943 	} else {
1944 		/* Setup the receive address. */
1945 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
1946 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
1947 			  hw->mac.addr[0], hw->mac.addr[1],
1948 			  hw->mac.addr[2]);
1949 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1950 			  hw->mac.addr[4], hw->mac.addr[5]);
1951 
1952 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1953 
1954 		/* clear VMDq pool/queue selection for RAR 0 */
1955 		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1956 	}
1957 	hw->addr_ctrl.overflow_promisc = 0;
1958 
1959 	hw->addr_ctrl.rar_used_count = 1;
1960 
1961 	/* Zero out the other receive addresses. */
1962 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
1963 	for (i = 1; i < rar_entries; i++) {
1964 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1965 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1966 	}
1967 
1968 	/* Clear the MTA */
1969 	hw->addr_ctrl.mta_in_use = 0;
1970 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1971 
1972 	DEBUGOUT(" Clearing MTA\n");
1973 	for (i = 0; i < hw->mac.mcft_size; i++)
1974 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1975 
1976 	ixgbe_init_uta_tables(hw);
1977 
1978 	return IXGBE_SUCCESS;
1979 }
1980 
1981 /**
1982  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
1983  *  @hw: pointer to hardware structure
1984  *  @addr: new address
1985  *
1986  *  Adds it to unused receive address register or goes into promiscuous mode.
1987  **/
1988 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
1989 {
1990 	uint32_t rar_entries = hw->mac.num_rar_entries;
1991 	uint32_t rar;
1992 
1993 	DEBUGFUNC("ixgbe_add_uc_addr");
1994 
1995 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1996 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1997 
1998 	/*
1999 	 * Place this address in the RAR if there is room,
2000 	 * else put the controller into promiscuous mode
2001 	 */
2002 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2003 		rar = hw->addr_ctrl.rar_used_count;
2004 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2005 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2006 		hw->addr_ctrl.rar_used_count++;
2007 	} else {
2008 		hw->addr_ctrl.overflow_promisc++;
2009 	}
2010 
2011 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2012 }
2013 
2014 /**
2015  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2016  *  @hw: pointer to hardware structure
2017  *  @mc_addr: the multicast address
2018  *
2019  *  Extracts the 12 bits, from a multicast address, to determine which
2020  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2021  *  incoming rx multicast addresses, to determine the bit-vector to check in
2022  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2023  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2024  *  to mc_filter_type.
2025  **/
2026 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr)
2027 {
2028 	uint32_t vector = 0;
2029 
2030 	DEBUGFUNC("ixgbe_mta_vector");
2031 
2032 	switch (hw->mac.mc_filter_type) {
2033 	case 0:   /* use bits [47:36] of the address */
2034 		vector = ((mc_addr[4] >> 4) | (((uint16_t)mc_addr[5]) << 4));
2035 		break;
2036 	case 1:   /* use bits [46:35] of the address */
2037 		vector = ((mc_addr[4] >> 3) | (((uint16_t)mc_addr[5]) << 5));
2038 		break;
2039 	case 2:   /* use bits [45:34] of the address */
2040 		vector = ((mc_addr[4] >> 2) | (((uint16_t)mc_addr[5]) << 6));
2041 		break;
2042 	case 3:   /* use bits [43:32] of the address */
2043 		vector = ((mc_addr[4]) | (((uint16_t)mc_addr[5]) << 8));
2044 		break;
2045 	default:  /* Invalid mc_filter_type */
2046 		DEBUGOUT("MC filter type param set incorrectly\n");
2047 		panic("incorrect multicast filter type");
2048 		break;
2049 	}
2050 
2051 	/* vector can only be 12-bits or boundary will be exceeded */
2052 	vector &= 0xFFF;
2053 	return vector;
2054 }
2055 
2056 /**
2057  *  ixgbe_set_mta - Set bit-vector in multicast table
2058  *  @hw: pointer to hardware structure
2059  *  @hash_value: Multicast address hash value
2060  *
2061  *  Sets the bit-vector in the multicast table.
2062  **/
2063 void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr)
2064 {
2065 	uint32_t vector;
2066 	uint32_t vector_bit;
2067 	uint32_t vector_reg;
2068 
2069 	DEBUGFUNC("ixgbe_set_mta");
2070 
2071 	hw->addr_ctrl.mta_in_use++;
2072 
2073 	vector = ixgbe_mta_vector(hw, mc_addr);
2074 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2075 
2076 	/*
2077 	 * The MTA is a register array of 128 32-bit registers. It is treated
2078 	 * like an array of 4096 bits.  We want to set bit
2079 	 * BitArray[vector_value]. So we figure out what register the bit is
2080 	 * in, read it, OR in the new bit, then write back the new value.  The
2081 	 * register is determined by the upper 7 bits of the vector value and
2082 	 * the bit within that register are determined by the lower 5 bits of
2083 	 * the value.
2084 	 */
2085 	vector_reg = (vector >> 5) & 0x7F;
2086 	vector_bit = vector & 0x1F;
2087 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2088 }
2089 
2090 /**
2091  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2092  *  @hw: pointer to hardware structure
2093  *  @mc_addr_list: the list of new multicast addresses
2094  *  @mc_addr_count: number of addresses
2095  *  @next: iterator function to walk the multicast address list
2096  *  @clear: flag, when set clears the table beforehand
2097  *
2098  *  When the clear flag is set, the given list replaces any existing list.
2099  *  Hashes the given addresses into the multicast table.
2100  **/
2101 int32_t ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *mc_addr_list,
2102 					  uint32_t mc_addr_count, ixgbe_mc_addr_itr next,
2103 					  bool clear)
2104 {
2105 	uint32_t i;
2106 	uint32_t vmdq;
2107 
2108 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2109 
2110 	/*
2111 	 * Set the new number of MC addresses that we are being requested to
2112 	 * use.
2113 	 */
2114 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2115 	hw->addr_ctrl.mta_in_use = 0;
2116 
2117 	/* Clear mta_shadow */
2118 	if (clear) {
2119 		DEBUGOUT(" Clearing MTA\n");
2120 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2121 	}
2122 
2123 	/* Update mta_shadow */
2124 	for (i = 0; i < mc_addr_count; i++) {
2125 		DEBUGOUT(" Adding the multicast addresses:\n");
2126 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2127 	}
2128 
2129 	/* Enable mta */
2130 	for (i = 0; i < hw->mac.mcft_size; i++)
2131 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2132 				      hw->mac.mta_shadow[i]);
2133 
2134 	if (hw->addr_ctrl.mta_in_use > 0)
2135 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2136 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2137 
2138 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2139 	return IXGBE_SUCCESS;
2140 }
2141 
2142 /**
2143  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2144  *  @hw: pointer to hardware structure
2145  *
2146  *  Enables multicast address in RAR and the use of the multicast hash table.
2147  **/
2148 int32_t ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2149 {
2150 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2151 
2152 	DEBUGFUNC("ixgbe_enable_mc_generic");
2153 
2154 	if (a->mta_in_use > 0)
2155 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2156 				hw->mac.mc_filter_type);
2157 
2158 	return IXGBE_SUCCESS;
2159 }
2160 
2161 /**
2162  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2163  *  @hw: pointer to hardware structure
2164  *
2165  *  Disables multicast address in RAR and the use of the multicast hash table.
2166  **/
2167 int32_t ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2168 {
2169 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2170 
2171 	DEBUGFUNC("ixgbe_disable_mc_generic");
2172 
2173 	if (a->mta_in_use > 0)
2174 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2175 
2176 	return IXGBE_SUCCESS;
2177 }
2178 
2179 /**
2180  *  ixgbe_fc_enable_generic - Enable flow control
2181  *  @hw: pointer to hardware structure
2182  *
2183  *  Enable flow control according to the current settings.
2184  **/
2185 int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2186 {
2187 	int32_t ret_val = IXGBE_SUCCESS;
2188 	uint32_t mflcn_reg, fccfg_reg;
2189 	uint32_t reg;
2190 	uint32_t fcrtl, fcrth;
2191 	int i;
2192 
2193 	DEBUGFUNC("ixgbe_fc_enable_generic");
2194 
2195 	/* Validate the water mark configuration */
2196 	if (!hw->fc.pause_time) {
2197 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2198 		goto out;
2199 	}
2200 
2201 	/* Low water mark of zero causes XOFF floods */
2202 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2203 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2204 		    hw->fc.high_water[i]) {
2205 			if (!hw->fc.low_water[i] ||
2206 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2207 				DEBUGOUT("Invalid water mark configuration\n");
2208 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2209 				goto out;
2210 			}
2211 		}
2212 	}
2213 
2214 	/* Negotiate the fc mode to use */
2215 	ixgbe_fc_autoneg(hw);
2216 
2217 	/* Disable any previous flow control settings */
2218 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2219 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2220 
2221 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2222 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2223 
2224 	/*
2225 	 * The possible values of fc.current_mode are:
2226 	 * 0: Flow control is completely disabled
2227 	 * 1: Rx flow control is enabled (we can receive pause frames,
2228 	 *    but not send pause frames).
2229 	 * 2: Tx flow control is enabled (we can send pause frames but
2230 	 *    we do not support receiving pause frames).
2231 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2232 	 * other: Invalid.
2233 	 */
2234 	switch (hw->fc.current_mode) {
2235 	case ixgbe_fc_none:
2236 		/*
2237 		 * Flow control is disabled by software override or autoneg.
2238 		 * The code below will actually disable it in the HW.
2239 		 */
2240 		break;
2241 	case ixgbe_fc_rx_pause:
2242 		/*
2243 		 * Rx Flow control is enabled and Tx Flow control is
2244 		 * disabled by software override. Since there really
2245 		 * isn't a way to advertise that we are capable of RX
2246 		 * Pause ONLY, we will advertise that we support both
2247 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2248 		 * disable the adapter's ability to send PAUSE frames.
2249 		 */
2250 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2251 		break;
2252 	case ixgbe_fc_tx_pause:
2253 		/*
2254 		 * Tx Flow control is enabled, and Rx Flow control is
2255 		 * disabled by software override.
2256 		 */
2257 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2258 		break;
2259 	case ixgbe_fc_full:
2260 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2261 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2262 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2263 		break;
2264 	default:
2265 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2266 			     "Flow control param set incorrectly\n");
2267 		ret_val = IXGBE_ERR_CONFIG;
2268 		goto out;
2269 		break;
2270 	}
2271 
2272 	/* Set 802.3x based flow control settings. */
2273 	mflcn_reg |= IXGBE_MFLCN_DPF;
2274 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2275 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2276 
2277 
2278 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2279 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2280 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2281 		    hw->fc.high_water[i]) {
2282 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2283 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2284 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2285 		} else {
2286 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2287 			/*
2288 			 * In order to prevent Tx hangs when the internal Tx
2289 			 * switch is enabled we must set the high water mark
2290 			 * to the Rx packet buffer size - 24KB.  This allows
2291 			 * the Tx switch to function even under heavy Rx
2292 			 * workloads.
2293 			 */
2294 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 0x6000;
2295 		}
2296 
2297 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2298 	}
2299 
2300 	/* Configure pause time (2 TCs per register) */
2301 	reg = hw->fc.pause_time * 0x00010001;
2302 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2303 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2304 
2305 	/* Configure flow control refresh threshold value */
2306 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2307 
2308 out:
2309 	return ret_val;
2310 }
2311 
2312 /**
2313  *  ixgbe_negotiate_fc - Negotiate flow control
2314  *  @hw: pointer to hardware structure
2315  *  @adv_reg: flow control advertised settings
2316  *  @lp_reg: link partner's flow control settings
2317  *  @adv_sym: symmetric pause bit in advertisement
2318  *  @adv_asm: asymmetric pause bit in advertisement
2319  *  @lp_sym: symmetric pause bit in link partner advertisement
2320  *  @lp_asm: asymmetric pause bit in link partner advertisement
2321  *
2322  *  Find the intersection between advertised settings and link partner's
2323  *  advertised settings
2324  **/
2325 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
2326 			   uint32_t lp_reg, uint32_t adv_sym,
2327 			   uint32_t adv_asm, uint32_t lp_sym,
2328 			   uint32_t lp_asm)
2329 {
2330 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2331 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2332 			     "Local or link partner's advertised flow control "
2333 			     "settings are NULL. Local: %x, link partner: %x\n",
2334 			     adv_reg, lp_reg);
2335 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2336 	}
2337 
2338 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2339 		/*
2340 		 * Now we need to check if the user selected Rx ONLY
2341 		 * of pause frames.  In this case, we had to advertise
2342 		 * FULL flow control because we could not advertise RX
2343 		 * ONLY. Hence, we must now check to see if we need to
2344 		 * turn OFF the TRANSMISSION of PAUSE frames.
2345 		 */
2346 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2347 			hw->fc.current_mode = ixgbe_fc_full;
2348 			DEBUGOUT("Flow Control = FULL.\n");
2349 		} else {
2350 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2351 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2352 		}
2353 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2354 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2355 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2356 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2357 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2358 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2359 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2360 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2361 	} else {
2362 		hw->fc.current_mode = ixgbe_fc_none;
2363 		DEBUGOUT("Flow Control = NONE.\n");
2364 	}
2365 	return IXGBE_SUCCESS;
2366 }
2367 
2368 /**
2369  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2370  *  @hw: pointer to hardware structure
2371  *
2372  *  Enable flow control according on 1 gig fiber.
2373  **/
2374 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2375 {
2376 	uint32_t pcs_anadv_reg, pcs_lpab_reg, linkstat;
2377 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2378 
2379 	/*
2380 	 * On multispeed fiber at 1g, bail out if
2381 	 * - link is up but AN did not complete, or if
2382 	 * - link is up and AN completed but timed out
2383 	 */
2384 
2385 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2386 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2387 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2388 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2389 		goto out;
2390 	}
2391 
2392 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2393 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2394 
2395 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2396 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2397 				      IXGBE_PCS1GANA_ASM_PAUSE,
2398 				      IXGBE_PCS1GANA_SYM_PAUSE,
2399 				      IXGBE_PCS1GANA_ASM_PAUSE);
2400 
2401 out:
2402 	return ret_val;
2403 }
2404 
2405 /**
2406  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2407  *  @hw: pointer to hardware structure
2408  *
2409  *  Enable flow control according to IEEE clause 37.
2410  **/
2411 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2412 {
2413 	uint32_t links2, anlp1_reg, autoc_reg, links;
2414 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2415 
2416 	/*
2417 	 * On backplane, bail out if
2418 	 * - backplane autoneg was not completed, or if
2419 	 * - we are 82599 and link partner is not AN enabled
2420 	 */
2421 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2422 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2423 		DEBUGOUT("Auto-Negotiation did not complete\n");
2424 		goto out;
2425 	}
2426 
2427 	if (hw->mac.type == ixgbe_mac_82599EB) {
2428 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2429 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2430 			DEBUGOUT("Link partner is not AN enabled\n");
2431 			goto out;
2432 		}
2433 	}
2434 	/*
2435 	 * Read the 10g AN autoc and LP ability registers and resolve
2436 	 * local flow control settings accordingly
2437 	 */
2438 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2439 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2440 
2441 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2442 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2443 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2444 
2445 out:
2446 	return ret_val;
2447 }
2448 
2449 /**
2450  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2451  *  @hw: pointer to hardware structure
2452  *
2453  *  Enable flow control according to IEEE clause 37.
2454  **/
2455 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2456 {
2457 	uint16_t technology_ability_reg = 0;
2458 	uint16_t lp_technology_ability_reg = 0;
2459 
2460 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2461 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2462 			     &technology_ability_reg);
2463 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2464 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2465 			     &lp_technology_ability_reg);
2466 
2467 	return ixgbe_negotiate_fc(hw, (uint32_t)technology_ability_reg,
2468 				  (uint32_t)lp_technology_ability_reg,
2469 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2470 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2471 }
2472 
2473 /**
2474  *  ixgbe_fc_autoneg - Configure flow control
2475  *  @hw: pointer to hardware structure
2476  *
2477  *  Compares our advertised flow control capabilities to those advertised by
2478  *  our link partner, and determines the proper flow control mode to use.
2479  **/
2480 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2481 {
2482 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2483 	ixgbe_link_speed speed;
2484 	bool link_up;
2485 
2486 	DEBUGFUNC("ixgbe_fc_autoneg");
2487 
2488 	/*
2489 	 * AN should have completed when the cable was plugged in.
2490 	 * Look for reasons to bail out.  Bail out if:
2491 	 * - FC autoneg is disabled, or if
2492 	 * - link is not up.
2493 	 */
2494 	if (hw->fc.disable_fc_autoneg) {
2495 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2496 			     "Flow control autoneg is disabled");
2497 		goto out;
2498 	}
2499 
2500 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2501 	if (!link_up) {
2502 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
2503 		goto out;
2504 	}
2505 
2506 	switch (hw->phy.media_type) {
2507 	/* Autoneg flow control on fiber adapters */
2508 	case ixgbe_media_type_fiber_fixed:
2509 	case ixgbe_media_type_fiber_qsfp:
2510 	case ixgbe_media_type_fiber:
2511 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2512 			ret_val = ixgbe_fc_autoneg_fiber(hw);
2513 		break;
2514 
2515 	/* Autoneg flow control on backplane adapters */
2516 	case ixgbe_media_type_backplane:
2517 		ret_val = ixgbe_fc_autoneg_backplane(hw);
2518 		break;
2519 
2520 	/* Autoneg flow control on copper adapters */
2521 	case ixgbe_media_type_copper:
2522 		if (ixgbe_device_supports_autoneg_fc(hw))
2523 			ret_val = ixgbe_fc_autoneg_copper(hw);
2524 		break;
2525 
2526 	default:
2527 		break;
2528 	}
2529 
2530 out:
2531 	if (ret_val == IXGBE_SUCCESS) {
2532 		hw->fc.fc_was_autonegged = TRUE;
2533 	} else {
2534 		hw->fc.fc_was_autonegged = FALSE;
2535 		hw->fc.current_mode = hw->fc.requested_mode;
2536 	}
2537 }
2538 
2539 /*
2540  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2541  * @hw: pointer to hardware structure
2542  *
2543  * System-wide timeout range is encoded in PCIe Device Control2 register.
2544  *
2545  * Add 10% to specified maximum and return the number of times to poll for
2546  * completion timeout, in units of 100 microsec.  Never return less than
2547  * 800 = 80 millisec.
2548  */
2549 static uint32_t ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2550 {
2551 	int16_t devctl2;
2552 	uint32_t pollcnt;
2553 
2554 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
2555 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2556 
2557 	switch (devctl2) {
2558 	case IXGBE_PCIDEVCTRL2_65_130ms:
2559 		pollcnt = 1300;		/* 130 millisec */
2560 		break;
2561 	case IXGBE_PCIDEVCTRL2_260_520ms:
2562 		pollcnt = 5200;		/* 520 millisec */
2563 		break;
2564 	case IXGBE_PCIDEVCTRL2_1_2s:
2565 		pollcnt = 20000;	/* 2 sec */
2566 		break;
2567 	case IXGBE_PCIDEVCTRL2_4_8s:
2568 		pollcnt = 80000;	/* 8 sec */
2569 		break;
2570 	case IXGBE_PCIDEVCTRL2_17_34s:
2571 		pollcnt = 34000;	/* 34 sec */
2572 		break;
2573 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
2574 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
2575 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
2576 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
2577 	default:
2578 		pollcnt = 800;		/* 80 millisec minimum */
2579 		break;
2580 	}
2581 
2582 	/* add 10% to spec maximum */
2583 	return (pollcnt * 11) / 10;
2584 }
2585 
2586 /**
2587  *  ixgbe_disable_pcie_master - Disable PCI-express master access
2588  *  @hw: pointer to hardware structure
2589  *
2590  *  Disables PCI-Express master access and verifies there are no pending
2591  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2592  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2593  *  is returned signifying master requests disabled.
2594  **/
2595 int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2596 {
2597 	int32_t status = IXGBE_SUCCESS;
2598 	uint32_t i, poll;
2599 
2600 	DEBUGFUNC("ixgbe_disable_pcie_master");
2601 
2602 	/* Always set this bit to ensure any future transactions are blocked */
2603 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2604 
2605 	/* Exit if master requests are blocked */
2606 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2607 		goto out;
2608 
2609 	/* Poll for master request bit to clear */
2610 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2611 		usec_delay(100);
2612 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2613 			goto out;
2614 	}
2615 
2616 	/*
2617 	 * Two consecutive resets are required via CTRL.RST per datasheet
2618 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
2619 	 * of this need.  The first reset prevents new master requests from
2620 	 * being issued by our device.  We then must wait 1usec or more for any
2621 	 * remaining completions from the PCIe bus to trickle in, and then reset
2622 	 * again to clear out any effects they may have had on our device.
2623 	 */
2624 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2625 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2626 
2627 	if (hw->mac.type >= ixgbe_mac_X550)
2628 		goto out;
2629 
2630 	/*
2631 	 * Before proceeding, make sure that the PCIe block does not have
2632 	 * transactions pending.
2633 	 */
2634 	poll = ixgbe_pcie_timeout_poll(hw);
2635 	for (i = 0; i < poll; i++) {
2636 		usec_delay(100);
2637 		if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2638 		    IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2639 			goto out;
2640 	}
2641 
2642 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
2643 		     "PCIe transaction pending bit also did not clear.\n");
2644 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2645 
2646 out:
2647 	return status;
2648 }
2649 
2650 /**
2651  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2652  *  @hw: pointer to hardware structure
2653  *  @mask: Mask to specify which semaphore to acquire
2654  *
2655  *  Acquires the SWFW semaphore through the GSSR register for the specified
2656  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2657  **/
2658 int32_t ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, uint32_t mask)
2659 {
2660 	uint32_t gssr = 0;
2661 	uint32_t swmask = mask;
2662 	uint32_t fwmask = mask << 5;
2663 	uint32_t timeout = 200;
2664 	uint32_t i;
2665 
2666 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
2667 
2668 	for (i = 0; i < timeout; i++) {
2669 		/*
2670 		 * SW NVM semaphore bit is used for access to all
2671 		 * SW_FW_SYNC bits (not just NVM)
2672 		 */
2673 		if (ixgbe_get_eeprom_semaphore(hw))
2674 			return IXGBE_ERR_SWFW_SYNC;
2675 
2676 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2677 		if (!(gssr & (fwmask | swmask))) {
2678 			gssr |= swmask;
2679 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2680 			ixgbe_release_eeprom_semaphore(hw);
2681 			return IXGBE_SUCCESS;
2682 		} else {
2683 			/* Resource is currently in use by FW or SW */
2684 			ixgbe_release_eeprom_semaphore(hw);
2685 			msec_delay(5);
2686 		}
2687 	}
2688 
2689 	/* If time expired clear the bits holding the lock and retry */
2690 	if (gssr & (fwmask | swmask))
2691 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2692 
2693 	msec_delay(5);
2694 	return IXGBE_ERR_SWFW_SYNC;
2695 }
2696 
2697 /**
2698  *  ixgbe_release_swfw_sync - Release SWFW semaphore
2699  *  @hw: pointer to hardware structure
2700  *  @mask: Mask to specify which semaphore to release
2701  *
2702  *  Releases the SWFW semaphore through the GSSR register for the specified
2703  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2704  **/
2705 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, uint32_t mask)
2706 {
2707 	uint32_t gssr;
2708 	uint32_t swmask = mask;
2709 
2710 	DEBUGFUNC("ixgbe_release_swfw_sync");
2711 
2712 	ixgbe_get_eeprom_semaphore(hw);
2713 
2714 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2715 	gssr &= ~swmask;
2716 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2717 
2718 	ixgbe_release_eeprom_semaphore(hw);
2719 }
2720 
2721 /**
2722  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2723  *  @hw: pointer to hardware structure
2724  *
2725  *  Stops the receive data path and waits for the HW to internally empty
2726  *  the Rx security block
2727  **/
2728 int32_t ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2729 {
2730 #define IXGBE_MAX_SECRX_POLL 40
2731 
2732 	int i;
2733 	int secrxreg;
2734 
2735 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2736 
2737 
2738 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2739 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2740 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2741 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2742 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2743 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2744 			break;
2745 		else
2746 			/* Use interrupt-safe sleep just in case */
2747 			usec_delay(1000);
2748 	}
2749 
2750 	/* For informational purposes only */
2751 	if (i >= IXGBE_MAX_SECRX_POLL)
2752 		DEBUGOUT("Rx unit being enabled before security "
2753 			 "path fully disabled.  Continuing with init.\n");
2754 
2755 	return IXGBE_SUCCESS;
2756 }
2757 
2758 /**
2759  *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
2760  *  @hw: pointer to hardware structure
2761  *  @reg_val: Value we read from AUTOC
2762  *
2763  *  The default case requires no protection so just to the register read.
2764  */
2765 int32_t prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked,
2766 				uint32_t *reg_val)
2767 {
2768 	*locked = FALSE;
2769 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2770 	return IXGBE_SUCCESS;
2771 }
2772 
2773 /**
2774  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
2775  * @hw: pointer to hardware structure
2776  * @reg_val: value to write to AUTOC
2777  * @locked: bool to indicate whether the SW/FW lock was already taken by
2778  *           previous read.
2779  *
2780  * The default case requires no protection so just to the register write.
2781  */
2782 int32_t prot_autoc_write_generic(struct ixgbe_hw *hw, uint32_t reg_val,
2783 				 bool locked)
2784 {
2785 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2786 	return IXGBE_SUCCESS;
2787 }
2788 
2789 /**
2790  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2791  *  @hw: pointer to hardware structure
2792  *
2793  *  Enables the receive data path.
2794  **/
2795 int32_t ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2796 {
2797 	int secrxreg;
2798 
2799 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2800 
2801 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2802 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2803 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2804 	IXGBE_WRITE_FLUSH(hw);
2805 
2806 	return IXGBE_SUCCESS;
2807 }
2808 
2809 /**
2810  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2811  *  @hw: pointer to hardware structure
2812  *  @regval: register value to write to RXCTRL
2813  *
2814  *  Enables the Rx DMA unit
2815  **/
2816 int32_t ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, uint32_t regval)
2817 {
2818 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2819 
2820 	if (regval & IXGBE_RXCTRL_RXEN)
2821 		ixgbe_enable_rx(hw);
2822 	else
2823 		ixgbe_disable_rx(hw);
2824 
2825 	return IXGBE_SUCCESS;
2826 }
2827 
2828 /**
2829  *  ixgbe_blink_led_start_generic - Blink LED based on index.
2830  *  @hw: pointer to hardware structure
2831  *  @index: led number to blink
2832  **/
2833 int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index)
2834 {
2835 	ixgbe_link_speed speed = 0;
2836 	bool link_up = 0;
2837 	uint32_t autoc_reg = 0;
2838 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2839 	int32_t ret_val = IXGBE_SUCCESS;
2840 	bool locked = FALSE;
2841 
2842 	DEBUGFUNC("ixgbe_blink_led_start_generic");
2843 
2844 	/*
2845 	 * Link must be up to auto-blink the LEDs;
2846 	 * Force it if link is down.
2847 	 */
2848 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2849 
2850 	if (!link_up) {
2851 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2852 		if (ret_val != IXGBE_SUCCESS)
2853 			goto out;
2854 
2855 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2856 		autoc_reg |= IXGBE_AUTOC_FLU;
2857 
2858 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2859 		if (ret_val != IXGBE_SUCCESS)
2860 			goto out;
2861 
2862 		IXGBE_WRITE_FLUSH(hw);
2863 		msec_delay(10);
2864 	}
2865 
2866 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2867 	led_reg |= IXGBE_LED_BLINK(index);
2868 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2869 	IXGBE_WRITE_FLUSH(hw);
2870 
2871 out:
2872 	return ret_val;
2873 }
2874 
2875 /**
2876  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2877  *  @hw: pointer to hardware structure
2878  *  @index: led number to stop blinking
2879  **/
2880 int32_t ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, uint32_t index)
2881 {
2882 	uint32_t autoc_reg = 0;
2883 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2884 	int32_t ret_val = IXGBE_SUCCESS;
2885 	bool locked = FALSE;
2886 
2887 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
2888 
2889 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2890 	if (ret_val != IXGBE_SUCCESS)
2891 		goto out;
2892 
2893 	autoc_reg &= ~IXGBE_AUTOC_FLU;
2894 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2895 
2896 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2897 	if (ret_val != IXGBE_SUCCESS)
2898 		goto out;
2899 
2900 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2901 	led_reg &= ~IXGBE_LED_BLINK(index);
2902 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2903 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2904 	IXGBE_WRITE_FLUSH(hw);
2905 
2906 out:
2907 	return ret_val;
2908 }
2909 
2910 /**
2911  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2912  *  @hw: pointer to hardware structure
2913  *
2914  *  Read PCIe configuration space, and get the MSI-X vector count from
2915  *  the capabilities table.
2916  **/
2917 uint16_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2918 {
2919 	uint16_t msix_count = 1;
2920 	uint16_t max_msix_count;
2921 	uint16_t pcie_offset;
2922 
2923 	switch (hw->mac.type) {
2924 	case ixgbe_mac_82598EB:
2925 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2926 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2927 		break;
2928 	case ixgbe_mac_82599EB:
2929 	case ixgbe_mac_X540:
2930 	case ixgbe_mac_X550:
2931 	case ixgbe_mac_X550EM_x:
2932 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2933 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2934 		break;
2935 	default:
2936 		return msix_count;
2937 	}
2938 
2939 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
2940 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
2941 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2942 
2943 	/* MSI-X count is zero-based in HW */
2944 	msix_count++;
2945 
2946 	if (msix_count > max_msix_count)
2947 		msix_count = max_msix_count;
2948 
2949 	return msix_count;
2950 }
2951 
2952 /**
2953  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
2954  *  @hw: pointer to hardware structure
2955  *  @addr: Address to put into receive address register
2956  *  @vmdq: VMDq pool to assign
2957  *
2958  *  Puts an ethernet address into a receive address register, or
2959  *  finds the rar that it is already in; adds to the pool list
2960  **/
2961 int32_t ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
2962 {
2963 	static const uint32_t NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
2964 	uint32_t first_empty_rar = NO_EMPTY_RAR_FOUND;
2965 	uint32_t rar;
2966 	uint32_t rar_low, rar_high;
2967 	uint32_t addr_low, addr_high;
2968 
2969 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
2970 
2971 	/* swap bytes for HW little endian */
2972 	addr_low  = addr[0] | (addr[1] << 8)
2973 			    | (addr[2] << 16)
2974 			    | (addr[3] << 24);
2975 	addr_high = addr[4] | (addr[5] << 8);
2976 
2977 	/*
2978 	 * Either find the mac_id in rar or find the first empty space.
2979 	 * rar_highwater points to just after the highest currently used
2980 	 * rar in order to shorten the search.  It grows when we add a new
2981 	 * rar to the top.
2982 	 */
2983 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
2984 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
2985 
2986 		if (((IXGBE_RAH_AV & rar_high) == 0)
2987 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
2988 			first_empty_rar = rar;
2989 		} else if ((rar_high & 0xFFFF) == addr_high) {
2990 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
2991 			if (rar_low == addr_low)
2992 				break;    /* found it already in the rars */
2993 		}
2994 	}
2995 
2996 	if (rar < hw->mac.rar_highwater) {
2997 		/* already there so just add to the pool bits */
2998 		ixgbe_set_vmdq(hw, rar, vmdq);
2999 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3000 		/* stick it into first empty RAR slot we found */
3001 		rar = first_empty_rar;
3002 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3003 	} else if (rar == hw->mac.rar_highwater) {
3004 		/* add it to the top of the list and inc the highwater mark */
3005 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3006 		hw->mac.rar_highwater++;
3007 	} else if (rar >= hw->mac.num_rar_entries) {
3008 		return IXGBE_ERR_INVALID_MAC_ADDR;
3009 	}
3010 
3011 	/*
3012 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3013 	 * remains cleared to be sure default pool packets will get delivered
3014 	 */
3015 	if (rar == 0)
3016 		ixgbe_clear_vmdq(hw, rar, 0);
3017 
3018 	return rar;
3019 }
3020 
3021 /**
3022  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3023  *  @hw: pointer to hardware struct
3024  *  @rar: receive address register index to disassociate
3025  *  @vmdq: VMDq pool index to remove from the rar
3026  **/
3027 int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3028 {
3029 	uint32_t mpsar_lo, mpsar_hi;
3030 	uint32_t rar_entries = hw->mac.num_rar_entries;
3031 
3032 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3033 
3034 	/* Make sure we are using a valid rar index range */
3035 	if (rar >= rar_entries) {
3036 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3037 			     "RAR index %d is out of range.\n", rar);
3038 		return IXGBE_ERR_INVALID_ARGUMENT;
3039 	}
3040 
3041 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3042 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3043 
3044 	if (!mpsar_lo && !mpsar_hi)
3045 		goto done;
3046 
3047 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3048 		if (mpsar_lo) {
3049 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3050 			mpsar_lo = 0;
3051 		}
3052 		if (mpsar_hi) {
3053 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3054 			mpsar_hi = 0;
3055 		}
3056 	} else if (vmdq < 32) {
3057 		mpsar_lo &= ~(1 << vmdq);
3058 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3059 	} else {
3060 		mpsar_hi &= ~(1 << (vmdq - 32));
3061 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3062 	}
3063 
3064 	/* was that the last pool using this rar? */
3065 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3066 		hw->mac.ops.clear_rar(hw, rar);
3067 done:
3068 	return IXGBE_SUCCESS;
3069 }
3070 
3071 /**
3072  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3073  *  @hw: pointer to hardware struct
3074  *  @rar: receive address register index to associate with a VMDq index
3075  *  @vmdq: VMDq pool index
3076  **/
3077 int32_t ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3078 {
3079 	uint32_t mpsar;
3080 	uint32_t rar_entries = hw->mac.num_rar_entries;
3081 
3082 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3083 
3084 	/* Make sure we are using a valid rar index range */
3085 	if (rar >= rar_entries) {
3086 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3087 			     "RAR index %d is out of range.\n", rar);
3088 		return IXGBE_ERR_INVALID_ARGUMENT;
3089 	}
3090 
3091 	if (vmdq < 32) {
3092 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3093 		mpsar |= 1 << vmdq;
3094 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3095 	} else {
3096 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3097 		mpsar |= 1 << (vmdq - 32);
3098 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3099 	}
3100 	return IXGBE_SUCCESS;
3101 }
3102 
3103 /**
3104  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3105  *  @hw: pointer to hardware structure
3106  **/
3107 int32_t ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3108 {
3109 	int i;
3110 
3111 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3112 	DEBUGOUT(" Clearing UTA\n");
3113 
3114 	for (i = 0; i < 128; i++)
3115 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3116 
3117 	return IXGBE_SUCCESS;
3118 }
3119 
3120 /**
3121  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3122  *  @hw: pointer to hardware structure
3123  *  @vlan: VLAN id to write to VLAN filter
3124  *
3125  *  return the VLVF index where this VLAN id should be placed
3126  *
3127  **/
3128 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan)
3129 {
3130 	uint32_t bits = 0;
3131 	uint32_t first_empty_slot = 0;
3132 	int32_t regindex;
3133 
3134 	/* short cut the special case */
3135 	if (vlan == 0)
3136 		return 0;
3137 
3138 	/*
3139 	  * Search for the vlan id in the VLVF entries. Save off the first empty
3140 	  * slot found along the way
3141 	  */
3142 	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3143 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3144 		if (!bits && !(first_empty_slot))
3145 			first_empty_slot = regindex;
3146 		else if ((bits & 0x0FFF) == vlan)
3147 			break;
3148 	}
3149 
3150 	/*
3151 	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3152 	  * in the VLVF. Else use the first empty VLVF register for this
3153 	  * vlan id.
3154 	  */
3155 	if (regindex >= IXGBE_VLVF_ENTRIES) {
3156 		if (first_empty_slot)
3157 			regindex = first_empty_slot;
3158 		else {
3159 			ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3160 				     "No space in VLVF.\n");
3161 			regindex = IXGBE_ERR_NO_SPACE;
3162 		}
3163 	}
3164 
3165 	return regindex;
3166 }
3167 
3168 /**
3169  *  ixgbe_set_vfta_generic - Set VLAN filter table
3170  *  @hw: pointer to hardware structure
3171  *  @vlan: VLAN id to write to VLAN filter
3172  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3173  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3174  *
3175  *  Turn on/off specified VLAN in the VLAN filter table.
3176  **/
3177 int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3178 			       bool vlan_on)
3179 {
3180 	int32_t regindex;
3181 	uint32_t bitindex;
3182 	uint32_t vfta;
3183 	uint32_t targetbit;
3184 	int32_t ret_val = IXGBE_SUCCESS;
3185 	bool vfta_changed = FALSE;
3186 
3187 	DEBUGFUNC("ixgbe_set_vfta_generic");
3188 
3189 	if (vlan > 4095)
3190 		return IXGBE_ERR_PARAM;
3191 
3192 	/*
3193 	 * this is a 2 part operation - first the VFTA, then the
3194 	 * VLVF and VLVFB if VT Mode is set
3195 	 * We don't write the VFTA until we know the VLVF part succeeded.
3196 	 */
3197 
3198 	/* Part 1
3199 	 * The VFTA is a bitstring made up of 128 32-bit registers
3200 	 * that enable the particular VLAN id, much like the MTA:
3201 	 *    bits[11-5]: which register
3202 	 *    bits[4-0]:  which bit in the register
3203 	 */
3204 	regindex = (vlan >> 5) & 0x7F;
3205 	bitindex = vlan & 0x1F;
3206 	targetbit = (1 << bitindex);
3207 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3208 
3209 	if (vlan_on) {
3210 		if (!(vfta & targetbit)) {
3211 			vfta |= targetbit;
3212 			vfta_changed = TRUE;
3213 		}
3214 	} else {
3215 		if ((vfta & targetbit)) {
3216 			vfta &= ~targetbit;
3217 			vfta_changed = TRUE;
3218 		}
3219 	}
3220 
3221 	/* Part 2
3222 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3223 	 */
3224 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3225 					 &vfta_changed);
3226 	if (ret_val != IXGBE_SUCCESS)
3227 		return ret_val;
3228 
3229 	if (vfta_changed)
3230 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3231 
3232 	return IXGBE_SUCCESS;
3233 }
3234 
3235 /**
3236  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3237  *  @hw: pointer to hardware structure
3238  *  @vlan: VLAN id to write to VLAN filter
3239  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3240  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3241  *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
3242  *                 should be changed
3243  *
3244  *  Turn on/off specified bit in VLVF table.
3245  **/
3246 int32_t ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3247 			       bool vlan_on, bool *vfta_changed)
3248 {
3249 	uint32_t vt;
3250 
3251 	DEBUGFUNC("ixgbe_set_vlvf_generic");
3252 
3253 	if (vlan > 4095)
3254 		return IXGBE_ERR_PARAM;
3255 
3256 	/* If VT Mode is set
3257 	 *   Either vlan_on
3258 	 *     make sure the vlan is in VLVF
3259 	 *     set the vind bit in the matching VLVFB
3260 	 *   Or !vlan_on
3261 	 *     clear the pool bit and possibly the vind
3262 	 */
3263 	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3264 	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3265 		int32_t vlvf_index;
3266 		uint32_t bits;
3267 
3268 		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3269 		if (vlvf_index < 0)
3270 			return vlvf_index;
3271 
3272 		if (vlan_on) {
3273 			/* set the pool bit */
3274 			if (vind < 32) {
3275 				bits = IXGBE_READ_REG(hw,
3276 						IXGBE_VLVFB(vlvf_index * 2));
3277 				bits |= (1 << vind);
3278 				IXGBE_WRITE_REG(hw,
3279 						IXGBE_VLVFB(vlvf_index * 2),
3280 						bits);
3281 			} else {
3282 				bits = IXGBE_READ_REG(hw,
3283 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3284 				bits |= (1 << (vind - 32));
3285 				IXGBE_WRITE_REG(hw,
3286 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3287 					bits);
3288 			}
3289 		} else {
3290 			/* clear the pool bit */
3291 			if (vind < 32) {
3292 				bits = IXGBE_READ_REG(hw,
3293 						IXGBE_VLVFB(vlvf_index * 2));
3294 				bits &= ~(1 << vind);
3295 				IXGBE_WRITE_REG(hw,
3296 						IXGBE_VLVFB(vlvf_index * 2),
3297 						bits);
3298 				bits |= IXGBE_READ_REG(hw,
3299 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3300 			} else {
3301 				bits = IXGBE_READ_REG(hw,
3302 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3303 				bits &= ~(1 << (vind - 32));
3304 				IXGBE_WRITE_REG(hw,
3305 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3306 					bits);
3307 				bits |= IXGBE_READ_REG(hw,
3308 						IXGBE_VLVFB(vlvf_index * 2));
3309 			}
3310 		}
3311 
3312 		/*
3313 		 * If there are still bits set in the VLVFB registers
3314 		 * for the VLAN ID indicated we need to see if the
3315 		 * caller is requesting that we clear the VFTA entry bit.
3316 		 * If the caller has requested that we clear the VFTA
3317 		 * entry bit but there are still pools/VFs using this VLAN
3318 		 * ID entry then ignore the request.  We're not worried
3319 		 * about the case where we're turning the VFTA VLAN ID
3320 		 * entry bit on, only when requested to turn it off as
3321 		 * there may be multiple pools and/or VFs using the
3322 		 * VLAN ID entry.  In that case we cannot clear the
3323 		 * VFTA bit until all pools/VFs using that VLAN ID have also
3324 		 * been cleared.  This will be indicated by "bits" being
3325 		 * zero.
3326 		 */
3327 		if (bits) {
3328 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3329 					(IXGBE_VLVF_VIEN | vlan));
3330 			if ((!vlan_on) && (vfta_changed != NULL)) {
3331 				/* someone wants to clear the vfta entry
3332 				 * but some pools/VFs are still using it.
3333 				 * Ignore it. */
3334 				*vfta_changed = FALSE;
3335 			}
3336 		} else
3337 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3338 	}
3339 
3340 	return IXGBE_SUCCESS;
3341 }
3342 
3343 /**
3344  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
3345  *  @hw: pointer to hardware structure
3346  *
3347  *  Clears the VLAN filer table, and the VMDq index associated with the filter
3348  **/
3349 int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3350 {
3351 	uint32_t offset;
3352 
3353 	DEBUGFUNC("ixgbe_clear_vfta_generic");
3354 
3355 	for (offset = 0; offset < hw->mac.vft_size; offset++)
3356 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3357 
3358 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3359 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3360 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3361 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3362 	}
3363 
3364 	return IXGBE_SUCCESS;
3365 }
3366 
3367 /**
3368  *  ixgbe_check_mac_link_generic - Determine link and speed status
3369  *  @hw: pointer to hardware structure
3370  *  @speed: pointer to link speed
3371  *  @link_up: TRUE when link is up
3372  *  @link_up_wait_to_complete: bool used to wait for link up or not
3373  *
3374  *  Reads the links register to determine if link is up and the current speed
3375  **/
3376 int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3377 				     bool *link_up, bool link_up_wait_to_complete)
3378 {
3379 	uint32_t links_reg, links_orig;
3380 	uint32_t i;
3381 
3382 	DEBUGFUNC("ixgbe_check_mac_link_generic");
3383 
3384 	/* clear the old state */
3385 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3386 
3387 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3388 
3389 	if (links_orig != links_reg) {
3390 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
3391 			  links_orig, links_reg);
3392 	}
3393 
3394 	if (link_up_wait_to_complete) {
3395 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
3396 			if (links_reg & IXGBE_LINKS_UP) {
3397 				*link_up = TRUE;
3398 				break;
3399 			} else {
3400 				*link_up = FALSE;
3401 			}
3402 			msec_delay(100);
3403 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3404 		}
3405 	} else {
3406 		if (links_reg & IXGBE_LINKS_UP)
3407 			*link_up = TRUE;
3408 		else
3409 			*link_up = FALSE;
3410 	}
3411 
3412 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3413 	case IXGBE_LINKS_SPEED_10G_82599:
3414 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
3415 		if (hw->mac.type >= ixgbe_mac_X550) {
3416 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3417 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3418 		}
3419 		break;
3420 	case IXGBE_LINKS_SPEED_1G_82599:
3421 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
3422 		break;
3423 	case IXGBE_LINKS_SPEED_100_82599:
3424 		*speed = IXGBE_LINK_SPEED_100_FULL;
3425 		if (hw->mac.type >= ixgbe_mac_X550) {
3426 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3427 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
3428 		}
3429 		break;
3430 	default:
3431 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
3432 	}
3433 
3434 	return IXGBE_SUCCESS;
3435 }
3436 
3437 /**
3438  *  ixgbe_get_device_caps_generic - Get additional device capabilities
3439  *  @hw: pointer to hardware structure
3440  *  @device_caps: the EEPROM word with the extra device capabilities
3441  *
3442  *  This function will read the EEPROM location for the device capabilities,
3443  *  and return the word through device_caps.
3444  **/
3445 int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps)
3446 {
3447 	DEBUGFUNC("ixgbe_get_device_caps_generic");
3448 
3449 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3450 
3451 	return IXGBE_SUCCESS;
3452 }
3453 
3454 /**
3455  *  ixgbe_host_interface_command - Issue command to manageability block
3456  *  @hw: pointer to the HW structure
3457  *  @buffer: contains the command to write and where the return status will
3458  *   be placed
3459  *  @length: length of buffer, must be multiple of 4 bytes
3460  *  @timeout: time in ms to wait for command completion
3461  *  @return_data: read and return data from the buffer (TRUE) or not (FALSE)
3462  *   Needed because FW structures are big endian and decoding of
3463  *   these fields can be 8 bit or 16 bit based on command. Decoding
3464  *   is not easily understood without making a table of commands.
3465  *   So we will leave this up to the caller to read back the data
3466  *   in these cases.
3467  *
3468  *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
3469  *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3470  **/
3471 int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
3472 				     uint32_t length, uint32_t timeout,
3473 				     bool return_data)
3474 {
3475 	uint32_t hicr, i, bi, fwsts;
3476 	uint32_t hdr_size = sizeof(struct ixgbe_hic_hdr);
3477 	uint16_t buf_len;
3478 	uint16_t dword_len;
3479 
3480 	DEBUGFUNC("ixgbe_host_interface_command");
3481 
3482 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3483 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
3484 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3485 	}
3486 	/* Set bit 9 of FWSTS clearing FW reset indication */
3487 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
3488 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
3489 
3490 	/* Check that the host interface is enabled. */
3491 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3492 	if ((hicr & IXGBE_HICR_EN) == 0) {
3493 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3494 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3495 	}
3496 
3497 	/* Calculate length in DWORDs. We must be DWORD aligned */
3498 	if ((length % (sizeof(uint32_t))) != 0) {
3499 		DEBUGOUT("Buffer length failure, not aligned to dword");
3500 		return IXGBE_ERR_INVALID_ARGUMENT;
3501 	}
3502 
3503 	dword_len = length >> 2;
3504 
3505 	/* The device driver writes the relevant command block
3506 	 * into the ram area.
3507 	 */
3508 	for (i = 0; i < dword_len; i++)
3509 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3510 				      i, htole32(buffer[i]));
3511 
3512 	/* Setting this bit tells the ARC that a new command is pending. */
3513 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3514 
3515 	for (i = 0; i < timeout; i++) {
3516 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3517 		if (!(hicr & IXGBE_HICR_C))
3518 			break;
3519 		msec_delay(1);
3520 	}
3521 
3522 	/* Check command completion */
3523 	if ((timeout != 0 && i == timeout) ||
3524 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
3525 		DEBUGOUT("Command has failed with no status valid.\n");
3526 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3527 	}
3528 
3529 	if (!return_data)
3530 		return 0;
3531 
3532 	/* Calculate length in DWORDs */
3533 	dword_len = hdr_size >> 2;
3534 
3535 	/* first pull in the header so we know the buffer length */
3536 	for (bi = 0; bi < dword_len; bi++) {
3537 		buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3538 		    IXGBE_FLEX_MNG, bi));
3539 	}
3540 
3541 	/* If there is any thing in data position pull it in */
3542 	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3543 	if (buf_len == 0)
3544 		return 0;
3545 
3546 	if (length < buf_len + hdr_size) {
3547 		DEBUGOUT("Buffer not large enough for reply message.\n");
3548 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3549 	}
3550 
3551 	/* Calculate length in DWORDs, add 3 for odd lengths */
3552 	dword_len = (buf_len + 3) >> 2;
3553 
3554 	/* Pull in the rest of the buffer (bi is where we left off) */
3555 	for (; bi <= dword_len; bi++) {
3556 		buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3557 		    IXGBE_FLEX_MNG, bi));
3558 	}
3559 
3560 	return 0;
3561 }
3562 
3563 /**
3564  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
3565  * @hw: pointer to the hardware structure
3566  *
3567  * The 82599 and x540 MACs can experience issues if TX work is still pending
3568  * when a reset occurs.  This function prevents this by flushing the PCIe
3569  * buffers on the system.
3570  **/
3571 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3572 {
3573 	uint32_t gcr_ext, hlreg0, i, poll;
3574 	uint16_t value;
3575 
3576 	/*
3577 	 * If double reset is not requested then all transactions should
3578 	 * already be clear and as such there is no work to do
3579 	 */
3580 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3581 		return;
3582 
3583 	/*
3584 	 * Set loopback enable to prevent any transmits from being sent
3585 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
3586 	 * has already been cleared.
3587 	 */
3588 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3589 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3590 
3591 	/* Wait for a last completion before clearing buffers */
3592 	IXGBE_WRITE_FLUSH(hw);
3593 	msec_delay(3);
3594 
3595 	/*
3596 	 * Before proceeding, make sure that the PCIe block does not have
3597 	 * transactions pending.
3598 	 */
3599 	poll = ixgbe_pcie_timeout_poll(hw);
3600 	for (i = 0; i < poll; i++) {
3601 		usec_delay(100);
3602 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3603 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3604 			goto out;
3605 	}
3606 
3607 out:
3608 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
3609 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3610 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3611 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3612 
3613 	/* Flush all writes and allow 20usec for all transactions to clear */
3614 	IXGBE_WRITE_FLUSH(hw);
3615 	usec_delay(20);
3616 
3617 	/* restore previous register values */
3618 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3619 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3620 }
3621 
3622 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
3623 {
3624 	uint32_t rxctrl;
3625 
3626 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3627 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
3628 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
3629 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3630 	}
3631 }
3632 
3633 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
3634 {
3635 	uint32_t rxctrl;
3636 
3637 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3638 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
3639 }
3640 
3641 /**
3642  * ixgbe_mng_present - returns TRUE when management capability is present
3643  * @hw: pointer to hardware structure
3644  */
3645 bool ixgbe_mng_present(struct ixgbe_hw *hw)
3646 {
3647 	uint32_t fwsm;
3648 
3649 	if (hw->mac.type < ixgbe_mac_82599EB)
3650 		return FALSE;
3651 
3652 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3653 	fwsm &= IXGBE_FWSM_MODE_MASK;
3654 	return fwsm == IXGBE_FWSM_FW_MODE_PT;
3655 }
3656 
3657 /**
3658  * ixgbe_mng_enabled - Is the manageability engine enabled?
3659  * @hw: pointer to hardware structure
3660  *
3661  * Returns TRUE if the manageability engine is enabled.
3662  **/
3663 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
3664 {
3665 	uint32_t fwsm, manc, factps;
3666 
3667 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3668 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
3669 		return FALSE;
3670 
3671 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
3672 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
3673 		return FALSE;
3674 
3675 	if (hw->mac.type <= ixgbe_mac_X540) {
3676 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
3677 		if (factps & IXGBE_FACTPS_MNGCG)
3678 			return FALSE;
3679 	}
3680 
3681 	return TRUE;
3682 }
3683 
3684 /**
3685  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
3686  *  @hw: pointer to hardware structure
3687  *  @speed: new link speed
3688  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
3689  *
3690  *  Set the link speed in the MAC and/or PHY register and restarts link.
3691  **/
3692 int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
3693 					      ixgbe_link_speed speed,
3694 					      bool autoneg_wait_to_complete)
3695 {
3696 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3697 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3698 	int32_t status = IXGBE_SUCCESS;
3699 	uint32_t speedcnt = 0;
3700 	uint32_t i = 0;
3701 	bool autoneg, link_up = FALSE;
3702 
3703 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
3704 
3705 	/* Mask off requested but non-supported speeds */
3706 	if (!hw->mac.ops.get_link_capabilities)
3707 		return IXGBE_NOT_IMPLEMENTED;
3708 	status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
3709 	if (status != IXGBE_SUCCESS)
3710 		return status;
3711 
3712 	speed &= link_speed;
3713 
3714 	/* Try each speed one by one, highest priority first.  We do this in
3715 	 * software because 10Gb fiber doesn't support speed autonegotiation.
3716 	 */
3717 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
3718 		speedcnt++;
3719 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
3720 
3721 		/* If we already have link at this speed, just jump out */
3722 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
3723 		if (status != IXGBE_SUCCESS)
3724 			return status;
3725 
3726 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
3727 			goto out;
3728 
3729 		/* Set the module link speed */
3730 		switch (hw->phy.media_type) {
3731 		case ixgbe_media_type_fiber_fixed:
3732 		case ixgbe_media_type_fiber:
3733 			if (hw->mac.ops.set_rate_select_speed)
3734 				hw->mac.ops.set_rate_select_speed(hw,
3735 				    IXGBE_LINK_SPEED_10GB_FULL);
3736 			break;
3737 		case ixgbe_media_type_fiber_qsfp:
3738 			/* QSFP module automatically detects MAC link speed */
3739 			break;
3740 		default:
3741 			DEBUGOUT("Unexpected media type.\n");
3742 			break;
3743 		}
3744 
3745 		/* Allow module to change analog characteristics (1G->10G) */
3746 		msec_delay(40);
3747 
3748 		if (!hw->mac.ops.setup_mac_link)
3749 			return IXGBE_NOT_IMPLEMENTED;
3750 		status = hw->mac.ops.setup_mac_link(hw,
3751 						    IXGBE_LINK_SPEED_10GB_FULL,
3752 						    autoneg_wait_to_complete);
3753 		if (status != IXGBE_SUCCESS)
3754 			return status;
3755 
3756 		/* Flap the Tx laser if it has not already been done */
3757 		ixgbe_flap_tx_laser(hw);
3758 
3759 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
3760 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
3761 		 * attempted.  82599 uses the same timing for 10g SFI.
3762 		 */
3763 		for (i = 0; i < 5; i++) {
3764 			/* Wait for the link partner to also set speed */
3765 			msec_delay(100);
3766 
3767 			/* If we have link, just jump out */
3768 			status = ixgbe_check_link(hw, &link_speed,
3769 						  &link_up, FALSE);
3770 			if (status != IXGBE_SUCCESS)
3771 				return status;
3772 
3773 			if (link_up)
3774 				goto out;
3775 		}
3776 	}
3777 
3778 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
3779 		speedcnt++;
3780 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
3781 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
3782 
3783 		/* If we already have link at this speed, just jump out */
3784 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
3785 		if (status != IXGBE_SUCCESS)
3786 			return status;
3787 
3788 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
3789 			goto out;
3790 
3791 		/* Set the module link speed */
3792 		switch (hw->phy.media_type) {
3793 		case ixgbe_media_type_fiber_fixed:
3794 		case ixgbe_media_type_fiber:
3795 			if (hw->mac.ops.set_rate_select_speed)
3796 				hw->mac.ops.set_rate_select_speed(hw,
3797 				    IXGBE_LINK_SPEED_1GB_FULL);
3798 			break;
3799 		case ixgbe_media_type_fiber_qsfp:
3800 			/* QSFP module automatically detects link speed */
3801 			break;
3802 		default:
3803 			DEBUGOUT("Unexpected media type.\n");
3804 			break;
3805 		}
3806 
3807 		/* Allow module to change analog characteristics (10G->1G) */
3808 		msec_delay(40);
3809 
3810 		if (!hw->mac.ops.setup_mac_link)
3811 			return IXGBE_NOT_IMPLEMENTED;
3812 		status = hw->mac.ops.setup_mac_link(hw,
3813 						    IXGBE_LINK_SPEED_1GB_FULL,
3814 						    autoneg_wait_to_complete);
3815 		if (status != IXGBE_SUCCESS)
3816 			return status;
3817 
3818 		/* Flap the Tx laser if it has not already been done */
3819 		ixgbe_flap_tx_laser(hw);
3820 
3821 		/* Wait for the link partner to also set speed */
3822 		msec_delay(100);
3823 
3824 		/* If we have link, just jump out */
3825 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
3826 		if (status != IXGBE_SUCCESS)
3827 			return status;
3828 
3829 		if (link_up)
3830 			goto out;
3831 	}
3832 
3833 	/* We didn't get link.  Configure back to the highest speed we tried,
3834 	 * (if there was more than one).  We call ourselves back with just the
3835 	 * single highest speed that the user requested.
3836 	 */
3837 	if (speedcnt > 1)
3838 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
3839 						      highest_link_speed,
3840 						      autoneg_wait_to_complete);
3841 
3842 out:
3843 	/* Set autoneg_advertised value based on input link speed */
3844 	hw->phy.autoneg_advertised = 0;
3845 
3846 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
3847 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3848 
3849 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
3850 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3851 
3852 	return status;
3853 }
3854 
3855 /**
3856  *  ixgbe_set_soft_rate_select_speed - Set module link speed
3857  *  @hw: pointer to hardware structure
3858  *  @speed: link speed to set
3859  *
3860  *  Set module link speed via the soft rate select.
3861  */
3862 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
3863 					ixgbe_link_speed speed)
3864 {
3865 	int32_t status;
3866 	uint8_t rs, eeprom_data;
3867 
3868 	switch (speed) {
3869 	case IXGBE_LINK_SPEED_10GB_FULL:
3870 		/* one bit mask same as setting on */
3871 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
3872 		break;
3873 	case IXGBE_LINK_SPEED_1GB_FULL:
3874 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
3875 		break;
3876 	default:
3877 		DEBUGOUT("Invalid fixed module speed\n");
3878 		return;
3879 	}
3880 
3881 	/* Set RS0 */
3882 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
3883 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
3884 					   &eeprom_data);
3885 	if (status) {
3886 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
3887 		goto out;
3888 	}
3889 
3890 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
3891 
3892 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
3893 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
3894 					    eeprom_data);
3895 	if (status) {
3896 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
3897 		goto out;
3898 	}
3899 
3900 	/* Set RS1 */
3901 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
3902 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
3903 					   &eeprom_data);
3904 	if (status) {
3905 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
3906 		goto out;
3907 	}
3908 
3909 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
3910 
3911 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
3912 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
3913 					    eeprom_data);
3914 	if (status) {
3915 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
3916 		goto out;
3917 	}
3918 out:
3919 	return;
3920 }
3921 
3922 /* MAC Operations */
3923 
3924 /**
3925  *  ixgbe_init_shared_code - Initialize the shared code
3926  *  @hw: pointer to hardware structure
3927  *
3928  *  This will assign function pointers and assign the MAC type and PHY code.
3929  *  Does not touch the hardware. This function must be called prior to any
3930  *  other function in the shared code. The ixgbe_hw structure should be
3931  *  memset to 0 prior to calling this function.  The following fields in
3932  *  hw structure should be filled in prior to calling this function:
3933  *  hw_addr, back, device_id, vendor_id, subsystem_device_id,
3934  *  subsystem_vendor_id, and revision_id
3935  **/
3936 int32_t ixgbe_init_shared_code(struct ixgbe_hw *hw)
3937 {
3938 	int32_t status;
3939 
3940 	DEBUGFUNC("ixgbe_init_shared_code");
3941 
3942 	/*
3943 	 * Set the mac type
3944 	 */
3945 	ixgbe_set_mac_type(hw);
3946 
3947 	switch (hw->mac.type) {
3948 	case ixgbe_mac_82598EB:
3949 		status = ixgbe_init_ops_82598(hw);
3950 		break;
3951 	case ixgbe_mac_82599EB:
3952 		status = ixgbe_init_ops_82599(hw);
3953 		break;
3954 	case ixgbe_mac_X540:
3955 		status = ixgbe_init_ops_X540(hw);
3956 		break;
3957 	case ixgbe_mac_X550:
3958 		status = ixgbe_init_ops_X550(hw);
3959 		break;
3960 	case ixgbe_mac_X550EM_x:
3961 		status = ixgbe_init_ops_X550EM(hw);
3962 		break;
3963 	default:
3964 		status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
3965 		break;
3966 	}
3967 	hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
3968 
3969 	return status;
3970 }
3971 
3972 /**
3973  *  ixgbe_set_mac_type - Sets MAC type
3974  *  @hw: pointer to the HW structure
3975  *
3976  *  This function sets the mac type of the adapter based on the
3977  *  vendor ID and device ID stored in the hw structure.
3978  **/
3979 int32_t ixgbe_set_mac_type(struct ixgbe_hw *hw)
3980 {
3981 	int32_t ret_val = IXGBE_SUCCESS;
3982 
3983 	DEBUGFUNC("ixgbe_set_mac_type\n");
3984 
3985 	if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID)
3986 		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
3987 
3988 	switch (hw->device_id) {
3989 	case IXGBE_DEV_ID_82598:
3990 	case IXGBE_DEV_ID_82598_BX:
3991 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
3992 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
3993 	case IXGBE_DEV_ID_82598AT:
3994 	case IXGBE_DEV_ID_82598AT2:
3995 	case IXGBE_DEV_ID_82598AT_DUAL_PORT:
3996 	case IXGBE_DEV_ID_82598EB_CX4:
3997 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
3998 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
3999 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
4000 	case IXGBE_DEV_ID_82598EB_XF_LR:
4001 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
4002 		hw->mac.type = ixgbe_mac_82598EB;
4003 		break;
4004 	case IXGBE_DEV_ID_82599_KX4:
4005 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
4006 	case IXGBE_DEV_ID_82599_XAUI_LOM:
4007 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
4008 	case IXGBE_DEV_ID_82599_KR:
4009 	case IXGBE_DEV_ID_82599_SFP:
4010 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
4011 	case IXGBE_DEV_ID_82599_SFP_FCOE:
4012 	case IXGBE_DEV_ID_82599_SFP_EM:
4013 	case IXGBE_DEV_ID_82599_SFP_SF2:
4014 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
4015 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
4016 	case IXGBE_DEV_ID_82599EN_SFP:
4017 	case IXGBE_DEV_ID_82599_CX4:
4018 	case IXGBE_DEV_ID_82599_BYPASS:
4019 	case IXGBE_DEV_ID_82599_T3_LOM:
4020 		hw->mac.type = ixgbe_mac_82599EB;
4021 		break;
4022 	case IXGBE_DEV_ID_82599_VF:
4023 	case IXGBE_DEV_ID_82599_VF_HV:
4024 		hw->mac.type = ixgbe_mac_82599_vf;
4025 		break;
4026 	case IXGBE_DEV_ID_X540_VF:
4027 	case IXGBE_DEV_ID_X540_VF_HV:
4028 		hw->mac.type = ixgbe_mac_X540_vf;
4029 		break;
4030 	case IXGBE_DEV_ID_X540T:
4031 	case IXGBE_DEV_ID_X540T1:
4032 	case IXGBE_DEV_ID_X540_BYPASS:
4033 		hw->mac.type = ixgbe_mac_X540;
4034 		break;
4035 	case IXGBE_DEV_ID_X550T:
4036 	case IXGBE_DEV_ID_X550T1:
4037 		hw->mac.type = ixgbe_mac_X550;
4038 		break;
4039 	case IXGBE_DEV_ID_X550EM_X_KX4:
4040 	case IXGBE_DEV_ID_X550EM_X_KR:
4041 	case IXGBE_DEV_ID_X550EM_X_10G_T:
4042 	case IXGBE_DEV_ID_X550EM_X_1G_T:
4043 	case IXGBE_DEV_ID_X550EM_X_SFP:
4044 		hw->mac.type = ixgbe_mac_X550EM_x;
4045 		break;
4046 	case IXGBE_DEV_ID_X550_VF:
4047 	case IXGBE_DEV_ID_X550_VF_HV:
4048 		hw->mac.type = ixgbe_mac_X550_vf;
4049 		break;
4050 	case IXGBE_DEV_ID_X550EM_X_VF:
4051 	case IXGBE_DEV_ID_X550EM_X_VF_HV:
4052 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
4053 		break;
4054 	default:
4055 		ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4056 		break;
4057 	}
4058 
4059 	return ret_val;
4060 }
4061 
4062 /**
4063  *  ixgbe_init_hw - Initialize the hardware
4064  *  @hw: pointer to hardware structure
4065  *
4066  *  Initialize the hardware by resetting and then starting the hardware
4067  **/
4068 int32_t ixgbe_init_hw(struct ixgbe_hw *hw)
4069 {
4070 	if (hw->mac.ops.init_hw)
4071 		return hw->mac.ops.init_hw(hw);
4072 	else
4073 		return IXGBE_NOT_IMPLEMENTED;
4074 }
4075 
4076 /**
4077  *  ixgbe_get_media_type - Get media type
4078  *  @hw: pointer to hardware structure
4079  *
4080  *  Returns the media type (fiber, copper, backplane)
4081  **/
4082 enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
4083 {
4084 	if (hw->mac.ops.get_media_type)
4085 		return hw->mac.ops.get_media_type(hw);
4086 	else
4087 		return ixgbe_media_type_unknown;
4088 }
4089 
4090 /**
4091  *  ixgbe_identify_phy - Get PHY type
4092  *  @hw: pointer to hardware structure
4093  *
4094  *  Determines the physical layer module found on the current adapter.
4095  **/
4096 int32_t ixgbe_identify_phy(struct ixgbe_hw *hw)
4097 {
4098 	int32_t status = IXGBE_SUCCESS;
4099 
4100 	if (hw->phy.type == ixgbe_phy_unknown) {
4101 		if (hw->phy.ops.identify)
4102 			status = hw->phy.ops.identify(hw);
4103 		else
4104 			status = IXGBE_NOT_IMPLEMENTED;
4105 	}
4106 
4107 	return status;
4108 }
4109 
4110 /**
4111  *  ixgbe_check_link - Get link and speed status
4112  *  @hw: pointer to hardware structure
4113  *
4114  *  Reads the links register to determine if link is up and the current speed
4115  **/
4116 int32_t ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4117 			 bool *link_up, bool link_up_wait_to_complete)
4118 {
4119 	if (hw->mac.ops.check_link)
4120 		return hw->mac.ops.check_link(hw, speed, link_up,
4121 					      link_up_wait_to_complete);
4122 	else
4123 		return IXGBE_NOT_IMPLEMENTED;
4124 }
4125 
4126 /**
4127  *  ixgbe_flap_tx_laser - flap Tx laser to start autotry process
4128  *  @hw: pointer to hardware structure
4129  *
4130  *  When the driver changes the link speeds that it can support then
4131  *  flap the tx laser to alert the link partner to start autotry
4132  *  process on its end.
4133  **/
4134 void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
4135 {
4136 	if (hw->mac.ops.flap_tx_laser)
4137 		hw->mac.ops.flap_tx_laser(hw);
4138 }
4139 
4140 /**
4141  *  ixgbe_set_rar - Set Rx address register
4142  *  @hw: pointer to hardware structure
4143  *  @index: Receive address register to write
4144  *  @addr: Address to put into receive address register
4145  *  @vmdq: VMDq "set"
4146  *  @enable_addr: set flag that address is active
4147  *
4148  *  Puts an ethernet address into a receive address register.
4149  **/
4150 int32_t ixgbe_set_rar(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
4151 		      uint32_t vmdq, uint32_t enable_addr)
4152 {
4153 	if (hw->mac.ops.set_rar)
4154 		return hw->mac.ops.set_rar(hw, index, addr, vmdq, enable_addr);
4155 	else
4156 		return IXGBE_NOT_IMPLEMENTED;
4157 }
4158 
4159 /**
4160  *  ixgbe_set_vmdq - Associate a VMDq index with a receive address
4161  *  @hw: pointer to hardware structure
4162  *  @rar: receive address register index to associate with VMDq index
4163  *  @vmdq: VMDq set or pool index
4164  **/
4165 int32_t ixgbe_set_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
4166 {
4167 	if (hw->mac.ops.set_vmdq)
4168 		return hw->mac.ops.set_vmdq(hw, rar, vmdq);
4169 	else
4170 		return IXGBE_NOT_IMPLEMENTED;
4171 }
4172 
4173 /**
4174  *  ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
4175  *  @hw: pointer to hardware structure
4176  *  @rar: receive address register index to disassociate with VMDq index
4177  *  @vmdq: VMDq set or pool index
4178  **/
4179 int32_t ixgbe_clear_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
4180 {
4181 	if (hw->mac.ops.clear_vmdq)
4182 		return hw->mac.ops.clear_vmdq(hw, rar, vmdq);
4183 	else
4184 		return IXGBE_NOT_IMPLEMENTED;
4185 }
4186 
4187 /**
4188  *  ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
4189  *  @hw: pointer to hardware structure
4190  *
4191  *  Initializes the Unicast Table Arrays to zero on device load.  This
4192  *  is part of the Rx init addr execution path.
4193  **/
4194 int32_t ixgbe_init_uta_tables(struct ixgbe_hw *hw)
4195 {
4196 	if (hw->mac.ops.init_uta_tables)
4197 		return hw->mac.ops.init_uta_tables(hw);
4198 	else
4199 		return IXGBE_NOT_IMPLEMENTED;
4200 }
4201 
4202 void ixgbe_disable_rx(struct ixgbe_hw *hw)
4203 {
4204 	if (hw->mac.ops.disable_rx)
4205 		hw->mac.ops.disable_rx(hw);
4206 }
4207 
4208 void ixgbe_enable_rx(struct ixgbe_hw *hw)
4209 {
4210 	if (hw->mac.ops.enable_rx)
4211 		hw->mac.ops.enable_rx(hw);
4212 }
4213 
4214 /*
4215  * MBX: Mailbox handling
4216  */
4217 
4218 /**
4219  *  ixgbe_read_mbx - Reads a message from the mailbox
4220  *  @hw: pointer to the HW structure
4221  *  @msg: The message buffer
4222  *  @size: Length of buffer
4223  *  @mbx_id: id of mailbox to read
4224  *
4225  *  returns SUCCESS if it successfully read message from buffer
4226  **/
4227 int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4228 {
4229 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4230 	int32_t ret_val = IXGBE_ERR_MBX;
4231 
4232 	DEBUGFUNC("ixgbe_read_mbx");
4233 
4234 	/* limit read to size of mailbox */
4235 	if (size > mbx->size)
4236 		size = mbx->size;
4237 
4238 	if (mbx->ops.read)
4239 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
4240 
4241 	return ret_val;
4242 }
4243 
4244 /**
4245  *  ixgbe_write_mbx - Write a message to the mailbox
4246  *  @hw: pointer to the HW structure
4247  *  @msg: The message buffer
4248  *  @size: Length of buffer
4249  *  @mbx_id: id of mailbox to write
4250  *
4251  *  returns SUCCESS if it successfully copied message into the buffer
4252  **/
4253 int32_t ixgbe_write_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4254 {
4255 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4256 	int32_t ret_val = IXGBE_SUCCESS;
4257 
4258 	DEBUGFUNC("ixgbe_write_mbx");
4259 
4260 	if (size > mbx->size)
4261 		ret_val = IXGBE_ERR_MBX;
4262 
4263 	else if (mbx->ops.write)
4264 		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4265 
4266 	return ret_val;
4267 }
4268 
4269 /**
4270  *  ixgbe_check_for_msg - checks to see if someone sent us mail
4271  *  @hw: pointer to the HW structure
4272  *  @mbx_id: id of mailbox to check
4273  *
4274  *  returns SUCCESS if the Status bit was found or else ERR_MBX
4275  **/
4276 int32_t ixgbe_check_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
4277 {
4278 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4279 	int32_t ret_val = IXGBE_ERR_MBX;
4280 
4281 	DEBUGFUNC("ixgbe_check_for_msg");
4282 
4283 	if (mbx->ops.check_for_msg)
4284 		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
4285 
4286 	return ret_val;
4287 }
4288 
4289 /**
4290  *  ixgbe_check_for_ack - checks to see if someone sent us ACK
4291  *  @hw: pointer to the HW structure
4292  *  @mbx_id: id of mailbox to check
4293  *
4294  *  returns SUCCESS if the Status bit was found or else ERR_MBX
4295  **/
4296 int32_t ixgbe_check_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
4297 {
4298 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4299 	int32_t ret_val = IXGBE_ERR_MBX;
4300 
4301 	DEBUGFUNC("ixgbe_check_for_ack");
4302 
4303 	if (mbx->ops.check_for_ack)
4304 		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
4305 
4306 	return ret_val;
4307 }
4308 
4309 /**
4310  *  ixgbe_check_for_rst - checks to see if other side has reset
4311  *  @hw: pointer to the HW structure
4312  *  @mbx_id: id of mailbox to check
4313  *
4314  *  returns SUCCESS if the Status bit was found or else ERR_MBX
4315  **/
4316 int32_t ixgbe_check_for_rst(struct ixgbe_hw *hw, uint16_t mbx_id)
4317 {
4318 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4319 	int32_t ret_val = IXGBE_ERR_MBX;
4320 
4321 	DEBUGFUNC("ixgbe_check_for_rst");
4322 
4323 	if (mbx->ops.check_for_rst)
4324 		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
4325 
4326 	return ret_val;
4327 }
4328 
4329 /**
4330  *  ixgbe_poll_for_msg - Wait for message notification
4331  *  @hw: pointer to the HW structure
4332  *  @mbx_id: id of mailbox to write
4333  *
4334  *  returns SUCCESS if it successfully received a message notification
4335  **/
4336 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
4337 {
4338 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4339 	int countdown = mbx->timeout;
4340 
4341 	DEBUGFUNC("ixgbe_poll_for_msg");
4342 
4343 	if (!countdown || !mbx->ops.check_for_msg)
4344 		goto out;
4345 
4346 	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
4347 		countdown--;
4348 		if (!countdown)
4349 			break;
4350 		usec_delay(mbx->usec_delay);
4351 	}
4352 
4353 out:
4354 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
4355 }
4356 
4357 /**
4358  *  ixgbe_poll_for_ack - Wait for message acknowledgement
4359  *  @hw: pointer to the HW structure
4360  *  @mbx_id: id of mailbox to write
4361  *
4362  *  returns SUCCESS if it successfully received a message acknowledgement
4363  **/
4364 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
4365 {
4366 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4367 	int countdown = mbx->timeout;
4368 
4369 	DEBUGFUNC("ixgbe_poll_for_ack");
4370 
4371 	if (!countdown || !mbx->ops.check_for_ack)
4372 		goto out;
4373 
4374 	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
4375 		countdown--;
4376 		if (!countdown)
4377 			break;
4378 		usec_delay(mbx->usec_delay);
4379 	}
4380 
4381 out:
4382 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
4383 }
4384 
4385 /**
4386  *  ixgbe_read_posted_mbx - Wait for message notification and receive message
4387  *  @hw: pointer to the HW structure
4388  *  @msg: The message buffer
4389  *  @size: Length of buffer
4390  *  @mbx_id: id of mailbox to write
4391  *
4392  *  returns SUCCESS if it successfully received a message notification and
4393  *  copied it into the receive buffer.
4394  **/
4395 int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4396 {
4397 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4398 	int32_t ret_val = IXGBE_ERR_MBX;
4399 
4400 	DEBUGFUNC("ixgbe_read_posted_mbx");
4401 
4402 	if (!mbx->ops.read)
4403 		goto out;
4404 
4405 	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
4406 
4407 	/* if ack received read message, otherwise we timed out */
4408 	if (!ret_val)
4409 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
4410 out:
4411 	return ret_val;
4412 }
4413 
4414 /**
4415  *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
4416  *  @hw: pointer to the HW structure
4417  *  @msg: The message buffer
4418  *  @size: Length of buffer
4419  *  @mbx_id: id of mailbox to write
4420  *
4421  *  returns SUCCESS if it successfully copied message into the buffer and
4422  *  received an ack to that message within delay * timeout period
4423  **/
4424 int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4425 			   uint16_t mbx_id)
4426 {
4427 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4428 	int32_t ret_val = IXGBE_ERR_MBX;
4429 
4430 	DEBUGFUNC("ixgbe_write_posted_mbx");
4431 
4432 	/* exit if either we can't write or there isn't a defined timeout */
4433 	if (!mbx->ops.write || !mbx->timeout)
4434 		goto out;
4435 
4436 	/* send msg */
4437 	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4438 
4439 	/* if msg sent wait until we receive an ack */
4440 	if (!ret_val)
4441 		ret_val = ixgbe_poll_for_ack(hw, mbx_id);
4442 out:
4443 	return ret_val;
4444 }
4445 
4446 /**
4447  *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
4448  *  @hw: pointer to the HW structure
4449  *
4450  *  Setups up the mailbox read and write message function pointers
4451  **/
4452 void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
4453 {
4454 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4455 
4456 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
4457 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
4458 }
4459 
4460 /**
4461  *  ixgbe_read_v2p_mailbox - read v2p mailbox
4462  *  @hw: pointer to the HW structure
4463  *
4464  *  This function is used to read the v2p mailbox without losing the read to
4465  *  clear status bits.
4466  **/
4467 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
4468 {
4469 	uint32_t v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
4470 
4471 	v2p_mailbox |= hw->mbx.v2p_mailbox;
4472 	hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
4473 
4474 	return v2p_mailbox;
4475 }
4476 
4477 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index)
4478 {
4479 	uint32_t mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
4480 	int32_t ret_val = IXGBE_ERR_MBX;
4481 
4482 	if (mbvficr & mask) {
4483 		ret_val = IXGBE_SUCCESS;
4484 		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
4485 	}
4486 
4487 	return ret_val;
4488 }
4489 
4490 /**
4491  *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
4492  *  @hw: pointer to the HW structure
4493  *  @vf_number: the VF index
4494  *
4495  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4496  **/
4497 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4498 {
4499 	int32_t ret_val = IXGBE_ERR_MBX;
4500 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
4501 	uint32_t vf_bit = vf_number % 16;
4502 
4503 	DEBUGFUNC("ixgbe_check_for_msg_pf");
4504 
4505 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
4506 				    index)) {
4507 		ret_val = IXGBE_SUCCESS;
4508 		hw->mbx.stats.reqs++;
4509 	}
4510 
4511 	return ret_val;
4512 }
4513 
4514 /**
4515  *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
4516  *  @hw: pointer to the HW structure
4517  *  @vf_number: the VF index
4518  *
4519  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4520  **/
4521 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4522 {
4523 	int32_t ret_val = IXGBE_ERR_MBX;
4524 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
4525 	uint32_t vf_bit = vf_number % 16;
4526 
4527 	DEBUGFUNC("ixgbe_check_for_ack_pf");
4528 
4529 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
4530 				    index)) {
4531 		ret_val = IXGBE_SUCCESS;
4532 		hw->mbx.stats.acks++;
4533 	}
4534 
4535 	return ret_val;
4536 }
4537 
4538 /**
4539  *  ixgbe_check_for_rst_pf - checks to see if the VF has reset
4540  *  @hw: pointer to the HW structure
4541  *  @vf_number: the VF index
4542  *
4543  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4544  **/
4545 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4546 {
4547 	uint32_t reg_offset = (vf_number < 32) ? 0 : 1;
4548 	uint32_t vf_shift = vf_number % 32;
4549 	uint32_t vflre = 0;
4550 	int32_t ret_val = IXGBE_ERR_MBX;
4551 
4552 	DEBUGFUNC("ixgbe_check_for_rst_pf");
4553 
4554 	switch (hw->mac.type) {
4555 	case ixgbe_mac_82599EB:
4556 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
4557 		break;
4558 	case ixgbe_mac_X550:
4559 	case ixgbe_mac_X550EM_x:
4560 	case ixgbe_mac_X540:
4561 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
4562 		break;
4563 	default:
4564 		break;
4565 	}
4566 
4567 	if (vflre & (1 << vf_shift)) {
4568 		ret_val = IXGBE_SUCCESS;
4569 		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
4570 		hw->mbx.stats.rsts++;
4571 	}
4572 
4573 	return ret_val;
4574 }
4575 
4576 /**
4577  *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
4578  *  @hw: pointer to the HW structure
4579  *  @vf_number: the VF index
4580  *
4581  *  return SUCCESS if we obtained the mailbox lock
4582  **/
4583 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4584 {
4585 	int32_t ret_val = IXGBE_ERR_MBX;
4586 	uint32_t p2v_mailbox;
4587 
4588 	DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
4589 
4590 	/* Take ownership of the buffer */
4591 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
4592 
4593 	/* reserve mailbox for vf use */
4594 	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
4595 	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
4596 		ret_val = IXGBE_SUCCESS;
4597 
4598 	return ret_val;
4599 }
4600 
4601 /**
4602  *  ixgbe_write_mbx_pf - Places a message in the mailbox
4603  *  @hw: pointer to the HW structure
4604  *  @msg: The message buffer
4605  *  @size: Length of buffer
4606  *  @vf_number: the VF index
4607  *
4608  *  returns SUCCESS if it successfully copied message into the buffer
4609  **/
4610 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4611 			   uint16_t vf_number)
4612 {
4613 	int32_t ret_val;
4614 	uint16_t i;
4615 
4616 	DEBUGFUNC("ixgbe_write_mbx_pf");
4617 
4618 	/* lock the mailbox to prevent pf/vf race condition */
4619 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4620 	if (ret_val)
4621 		goto out_no_write;
4622 
4623 	/* flush msg and acks as we are overwriting the message buffer */
4624 	ixgbe_check_for_msg_pf(hw, vf_number);
4625 	ixgbe_check_for_ack_pf(hw, vf_number);
4626 
4627 	/* copy the caller specified message to the mailbox memory buffer */
4628 	for (i = 0; i < size; i++)
4629 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
4630 
4631 	/* Interrupt VF to tell it a message has been sent and release buffer*/
4632 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
4633 
4634 	/* update stats */
4635 	hw->mbx.stats.msgs_tx++;
4636 
4637 out_no_write:
4638 	return ret_val;
4639 
4640 }
4641 
4642 /**
4643  *  ixgbe_read_mbx_pf - Read a message from the mailbox
4644  *  @hw: pointer to the HW structure
4645  *  @msg: The message buffer
4646  *  @size: Length of buffer
4647  *  @vf_number: the VF index
4648  *
4649  *  This function copies a message from the mailbox buffer to the caller's
4650  *  memory buffer.  The presumption is that the caller knows that there was
4651  *  a message due to a VF request so no polling for message is needed.
4652  **/
4653 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4654 			  uint16_t vf_number)
4655 {
4656 	int32_t ret_val;
4657 	uint16_t i;
4658 
4659 	DEBUGFUNC("ixgbe_read_mbx_pf");
4660 
4661 	/* lock the mailbox to prevent pf/vf race condition */
4662 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4663 	if (ret_val)
4664 		goto out_no_read;
4665 
4666 	/* copy the message to the mailbox memory buffer */
4667 	for (i = 0; i < size; i++)
4668 		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
4669 
4670 	/* Acknowledge the message and release buffer */
4671 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
4672 
4673 	/* update stats */
4674 	hw->mbx.stats.msgs_rx++;
4675 
4676 out_no_read:
4677 	return ret_val;
4678 }
4679 
4680 /**
4681  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
4682  *  @hw: pointer to the HW structure
4683  *
4684  *  Initializes the hw->mbx struct to correct values for pf mailbox
4685  */
4686 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
4687 {
4688 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4689 
4690 	if (hw->mac.type != ixgbe_mac_82599EB &&
4691 	    hw->mac.type != ixgbe_mac_X550 &&
4692 	    hw->mac.type != ixgbe_mac_X550EM_x &&
4693 	    hw->mac.type != ixgbe_mac_X540)
4694 		return;
4695 
4696 	mbx->timeout = 0;
4697 	mbx->usec_delay = 0;
4698 
4699 	mbx->size = IXGBE_VFMAILBOX_SIZE;
4700 
4701 	mbx->ops.read = ixgbe_read_mbx_pf;
4702 	mbx->ops.write = ixgbe_write_mbx_pf;
4703 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
4704 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
4705 	mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
4706 	mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
4707 	mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
4708 
4709 	mbx->stats.msgs_tx = 0;
4710 	mbx->stats.msgs_rx = 0;
4711 	mbx->stats.reqs = 0;
4712 	mbx->stats.acks = 0;
4713 	mbx->stats.rsts = 0;
4714 }
4715