xref: /openbsd/sys/dev/pci/ixgbe.c (revision 109b20f8)
1 /*	$OpenBSD: ixgbe.c,v 1.27 2022/01/27 18:28:45 bluhm Exp $	*/
2 
3 /******************************************************************************
4   SPDX-License-Identifier: BSD-3-Clause
5 
6   Copyright (c) 2001-2017, Intel Corporation
7   All rights reserved.
8 
9   Redistribution and use in source and binary forms, with or without
10   modification, are permitted provided that the following conditions are met:
11 
12    1. Redistributions of source code must retain the above copyright notice,
13       this list of conditions and the following disclaimer.
14 
15    2. Redistributions in binary form must reproduce the above copyright
16       notice, this list of conditions and the following disclaimer in the
17       documentation and/or other materials provided with the distribution.
18 
19    3. Neither the name of the Intel Corporation nor the names of its
20       contributors may be used to endorse or promote products derived from
21       this software without specific prior written permission.
22 
23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33   POSSIBILITY OF SUCH DAMAGE.
34 
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 326022 2017-11-20 19:36:21Z pfg $*/
37 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.c 326022 2017-11-20 19:36:21Z pfg $*/
38 
39 #include <dev/pci/ixgbe.h>
40 #include <dev/pci/ixgbe_type.h>
41 
42 #ifdef __sparc64__
43 #include <dev/ofw/openfirm.h>
44 #endif
45 
46 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
47 				       uint16_t link_status);
48 
49 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
50 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
51 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
52 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw);
53 void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
54 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
55 				 uint16_t count);
56 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count);
57 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
58 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
59 void ixgbe_release_eeprom(struct ixgbe_hw *hw);
60 
61 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr);
62 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
63 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
64 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
65 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
66 
67 int32_t prot_autoc_read_generic(struct ixgbe_hw *, bool *, uint32_t *);
68 int32_t prot_autoc_write_generic(struct ixgbe_hw *, uint32_t, bool);
69 
70 /* MBX */
71 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
72 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
73 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw);
74 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask,
75 			       int32_t index);
76 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number);
77 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number);
78 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number);
79 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number);
80 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
81 			   uint16_t vf_number);
82 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
83 			  uint16_t vf_number);
84 
85 #define IXGBE_EMPTY_PARAM
86 
87 static const uint32_t ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = {
88 	IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM)
89 };
90 
91 static const uint32_t ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
92 	IXGBE_MVALS_INIT(_X540)
93 };
94 
95 static const uint32_t ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
96 	IXGBE_MVALS_INIT(_X550)
97 };
98 
99 static const uint32_t ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
100 	IXGBE_MVALS_INIT(_X550EM_x)
101 };
102 
103 static const uint32_t ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = {
104 	IXGBE_MVALS_INIT(_X550EM_a)
105 };
106 
107 /**
108  *  ixgbe_init_ops_generic - Inits function ptrs
109  *  @hw: pointer to the hardware structure
110  *
111  *  Initialize the function pointers.
112  **/
ixgbe_init_ops_generic(struct ixgbe_hw * hw)113 int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
114 {
115 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
116 	struct ixgbe_mac_info *mac = &hw->mac;
117 	uint32_t eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
118 
119 	DEBUGFUNC("ixgbe_init_ops_generic");
120 
121 	/* EEPROM */
122 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
123 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
124 	if (eec & IXGBE_EEC_PRES)
125 		eeprom->ops.read = ixgbe_read_eerd_generic;
126 	else
127 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
128 	eeprom->ops.write = ixgbe_write_eeprom_generic;
129 	eeprom->ops.validate_checksum =
130 				      ixgbe_validate_eeprom_checksum_generic;
131 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
132 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
133 
134 	/* MAC */
135 	mac->ops.init_hw = ixgbe_init_hw_generic;
136 	mac->ops.reset_hw = NULL;
137 	mac->ops.start_hw = ixgbe_start_hw_generic;
138 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
139 	mac->ops.get_media_type = NULL;
140 	mac->ops.get_supported_physical_layer = NULL;
141 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
142 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
143 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
144 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
145 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
146 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
147 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
148 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
149 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
150 
151 	/* LEDs */
152 	mac->ops.led_on = ixgbe_led_on_generic;
153 	mac->ops.led_off = ixgbe_led_off_generic;
154 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
155 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
156 
157 	/* RAR, Multicast, VLAN */
158 	mac->ops.set_rar = ixgbe_set_rar_generic;
159 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
160 	mac->ops.insert_mac_addr = NULL;
161 	mac->ops.set_vmdq = NULL;
162 	mac->ops.clear_vmdq = NULL;
163 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
164 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
165 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
166 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
167 	mac->ops.clear_vfta = NULL;
168 	mac->ops.set_vfta = NULL;
169 	mac->ops.set_vlvf = NULL;
170 	mac->ops.init_uta_tables = NULL;
171 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
172 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
173 
174 	/* Flow Control */
175 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
176 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
177 	mac->ops.fc_autoneg = ixgbe_fc_autoneg;
178 
179 	/* Link */
180 	mac->ops.get_link_capabilities = NULL;
181 	mac->ops.setup_link = NULL;
182 	mac->ops.check_link = NULL;
183 	mac->ops.dmac_config = NULL;
184 	mac->ops.dmac_update_tcs = NULL;
185 	mac->ops.dmac_config_tcs = NULL;
186 
187 	return IXGBE_SUCCESS;
188 }
189 
190 /**
191  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
192  * of flow control
193  * @hw: pointer to hardware structure
194  *
195  * This function returns TRUE if the device supports flow control
196  * autonegotiation, and FALSE if it does not.
197  *
198  **/
ixgbe_device_supports_autoneg_fc(struct ixgbe_hw * hw)199 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
200 {
201 	bool supported = FALSE;
202 	ixgbe_link_speed speed;
203 	bool link_up;
204 
205 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
206 
207 	switch (hw->phy.media_type) {
208 	case ixgbe_media_type_fiber_fixed:
209 	case ixgbe_media_type_fiber_qsfp:
210 	case ixgbe_media_type_fiber:
211 		/* flow control autoneg black list */
212 		switch (hw->device_id) {
213 		case IXGBE_DEV_ID_X550EM_A_SFP:
214 		case IXGBE_DEV_ID_X550EM_A_SFP_N:
215 		case IXGBE_DEV_ID_X550EM_A_QSFP:
216 		case IXGBE_DEV_ID_X550EM_A_QSFP_N:
217 			supported = FALSE;
218 			break;
219 		default:
220 			hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
221 			/* if link is down, assume supported */
222 			if (link_up)
223 				supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
224 				TRUE : FALSE;
225 			else
226 				supported = TRUE;
227 		}
228 
229 		break;
230 	case ixgbe_media_type_backplane:
231 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
232 			supported = FALSE;
233 		else
234 			supported = TRUE;
235 		break;
236 	case ixgbe_media_type_copper:
237 		/* only some copper devices support flow control autoneg */
238 		switch (hw->device_id) {
239 		case IXGBE_DEV_ID_82599_T3_LOM:
240 		case IXGBE_DEV_ID_X540T:
241 		case IXGBE_DEV_ID_X540T1:
242 		case IXGBE_DEV_ID_X540_BYPASS:
243 		case IXGBE_DEV_ID_X550T:
244 		case IXGBE_DEV_ID_X550T1:
245 		case IXGBE_DEV_ID_X550EM_X_10G_T:
246 		case IXGBE_DEV_ID_X550EM_A_10G_T:
247 		case IXGBE_DEV_ID_X550EM_A_1G_T:
248 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
249 			supported = TRUE;
250 			break;
251 		default:
252 			supported = FALSE;
253 		}
254 	default:
255 		break;
256 	}
257 
258 	if (!supported) {
259 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
260 		      "Device %x does not support flow control autoneg",
261 		      hw->device_id);
262 	}
263 
264 	return supported;
265 }
266 
267 /**
268  *  ixgbe_setup_fc_generic - Set up flow control
269  *  @hw: pointer to hardware structure
270  *
271  *  Called at init time to set up flow control.
272  **/
ixgbe_setup_fc_generic(struct ixgbe_hw * hw)273 int32_t ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
274 {
275 	int32_t ret_val = IXGBE_SUCCESS;
276 	uint32_t reg = 0, reg_bp = 0;
277 	uint16_t reg_cu = 0;
278 	bool locked = FALSE;
279 
280 	DEBUGFUNC("ixgbe_setup_fc");
281 
282 	/* Validate the requested mode */
283 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
284 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
285 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
286 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
287 		goto out;
288 	}
289 
290 	/*
291 	 * 10gig parts do not have a word in the EEPROM to determine the
292 	 * default flow control setting, so we explicitly set it to full.
293 	 */
294 	if (hw->fc.requested_mode == ixgbe_fc_default)
295 		hw->fc.requested_mode = ixgbe_fc_full;
296 
297 	/*
298 	 * Set up the 1G and 10G flow control advertisement registers so the
299 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
300 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
301 	 */
302 	switch (hw->phy.media_type) {
303 	case ixgbe_media_type_backplane:
304 		/* some MAC's need RMW protection on AUTOC */
305 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
306 		if (ret_val != IXGBE_SUCCESS)
307 			goto out;
308 
309 		/* only backplane uses autoc so fall though */
310 	case ixgbe_media_type_fiber_fixed:
311 	case ixgbe_media_type_fiber_qsfp:
312 	case ixgbe_media_type_fiber:
313 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
314 
315 		break;
316 	case ixgbe_media_type_copper:
317 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
318 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
319 		break;
320 	default:
321 		break;
322 	}
323 
324 	/*
325 	 * The possible values of fc.requested_mode are:
326 	 * 0: Flow control is completely disabled
327 	 * 1: Rx flow control is enabled (we can receive pause frames,
328 	 *    but not send pause frames).
329 	 * 2: Tx flow control is enabled (we can send pause frames but
330 	 *    we do not support receiving pause frames).
331 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
332 	 * other: Invalid.
333 	 */
334 	switch (hw->fc.requested_mode) {
335 	case ixgbe_fc_none:
336 		/* Flow control completely disabled by software override. */
337 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
338 		if (hw->phy.media_type == ixgbe_media_type_backplane)
339 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
340 				    IXGBE_AUTOC_ASM_PAUSE);
341 		else if (hw->phy.media_type == ixgbe_media_type_copper)
342 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
343 		break;
344 	case ixgbe_fc_tx_pause:
345 		/*
346 		 * Tx Flow control is enabled, and Rx Flow control is
347 		 * disabled by software override.
348 		 */
349 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
350 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
351 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
352 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
353 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
354 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
355 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
356 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
357 		}
358 		break;
359 	case ixgbe_fc_rx_pause:
360 		/*
361 		 * Rx Flow control is enabled and Tx Flow control is
362 		 * disabled by software override. Since there really
363 		 * isn't a way to advertise that we are capable of RX
364 		 * Pause ONLY, we will advertise that we support both
365 		 * symmetric and asymmetric Rx PAUSE, as such we fall
366 		 * through to the fc_full statement.  Later, we will
367 		 * disable the adapter's ability to send PAUSE frames.
368 		 */
369 	case ixgbe_fc_full:
370 		/* Flow control (both Rx and Tx) is enabled by SW override. */
371 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
372 		if (hw->phy.media_type == ixgbe_media_type_backplane)
373 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
374 				  IXGBE_AUTOC_ASM_PAUSE;
375 		else if (hw->phy.media_type == ixgbe_media_type_copper)
376 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
377 		break;
378 	default:
379 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
380 			     "Flow control param set incorrectly\n");
381 		ret_val = IXGBE_ERR_CONFIG;
382 		goto out;
383 		break;
384 	}
385 
386 	if (hw->mac.type < ixgbe_mac_X540) {
387 		/*
388 		 * Enable auto-negotiation between the MAC & PHY;
389 		 * the MAC will advertise clause 37 flow control.
390 		 */
391 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
392 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
393 
394 		/* Disable AN timeout */
395 		if (hw->fc.strict_ieee)
396 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
397 
398 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
399 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
400 	}
401 
402 	/*
403 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
404 	 * and copper. There is no need to set the PCS1GCTL register.
405 	 *
406 	 */
407 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
408 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
409 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
410 		if (ret_val)
411 			goto out;
412 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
413 		    (ixgbe_device_supports_autoneg_fc(hw))) {
414 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
415 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
416 	}
417 
418 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
419 out:
420 	return ret_val;
421 }
422 
423 /**
424  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
425  *  @hw: pointer to hardware structure
426  *
427  *  Starts the hardware by filling the bus info structure and media type, clears
428  *  all on chip counters, initializes receive address registers, multicast
429  *  table, VLAN filter table, calls routine to set up link and flow control
430  *  settings, and leaves transmit and receive units disabled and uninitialized
431  **/
ixgbe_start_hw_generic(struct ixgbe_hw * hw)432 int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
433 {
434 	int32_t ret_val;
435 	uint32_t ctrl_ext;
436 	uint16_t device_caps;
437 
438 	DEBUGFUNC("ixgbe_start_hw_generic");
439 
440 	/* Set the media type */
441 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
442 
443 	/* PHY ops initialization must be done in reset_hw() */
444 
445 	/* Clear the VLAN filter table */
446 	hw->mac.ops.clear_vfta(hw);
447 
448 	/* Clear statistics registers */
449 	hw->mac.ops.clear_hw_cntrs(hw);
450 
451 	/* Set No Snoop Disable */
452 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
453 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
454 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
455 	IXGBE_WRITE_FLUSH(hw);
456 
457 	/* Setup flow control */
458 	if (hw->mac.ops.setup_fc) {
459 		ret_val = hw->mac.ops.setup_fc(hw);
460 		if (ret_val != IXGBE_SUCCESS) {
461 			DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
462 			return ret_val;
463 		}
464 	}
465 
466 	/* Cache bit indicating need for crosstalk fix */
467 	switch (hw->mac.type) {
468 	case ixgbe_mac_82599EB:
469 	case ixgbe_mac_X550EM_x:
470 	case ixgbe_mac_X550EM_a:
471 		hw->mac.ops.get_device_caps(hw, &device_caps);
472 		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
473 			hw->need_crosstalk_fix = FALSE;
474 		else
475 			hw->need_crosstalk_fix = TRUE;
476 		break;
477 	default:
478 		hw->need_crosstalk_fix = FALSE;
479 		break;
480 	}
481 
482 	/* Clear adapter stopped flag */
483 	hw->adapter_stopped = FALSE;
484 
485 	return IXGBE_SUCCESS;
486 }
487 
488 /**
489  *  ixgbe_start_hw_gen2 - Init sequence for common device family
490  *  @hw: pointer to hw structure
491  *
492  * Performs the init sequence common to the second generation
493  * of 10 GbE devices.
494  * Devices in the second generation:
495  *     82599
496  *     X540
497  **/
ixgbe_start_hw_gen2(struct ixgbe_hw * hw)498 int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
499 {
500 	uint32_t i;
501 	uint32_t regval;
502 
503 	/* Clear the rate limiters */
504 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
505 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
506 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
507 	}
508 	IXGBE_WRITE_FLUSH(hw);
509 
510 	/* Disable relaxed ordering */
511 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
512 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
513 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
514 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
515 	}
516 
517 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
518 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
519 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
520 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
521 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
522 	}
523 
524 	return IXGBE_SUCCESS;
525 }
526 
527 /**
528  *  ixgbe_init_hw_generic - Generic hardware initialization
529  *  @hw: pointer to hardware structure
530  *
531  *  Initialize the hardware by resetting the hardware, filling the bus info
532  *  structure and media type, clears all on chip counters, initializes receive
533  *  address registers, multicast table, VLAN filter table, calls routine to set
534  *  up link and flow control settings, and leaves transmit and receive units
535  *  disabled and uninitialized
536  **/
ixgbe_init_hw_generic(struct ixgbe_hw * hw)537 int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw)
538 {
539 	int32_t status;
540 
541 	DEBUGFUNC("ixgbe_init_hw_generic");
542 
543 	/* Reset the hardware */
544 	status = hw->mac.ops.reset_hw(hw);
545 
546 	if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
547 		/* Start the HW */
548 		status = hw->mac.ops.start_hw(hw);
549 	}
550 
551 	if (status != IXGBE_SUCCESS)
552 		DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
553 
554 	return status;
555 }
556 
557 /**
558  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
559  *  @hw: pointer to hardware structure
560  *
561  *  Clears all hardware statistics counters by reading them from the hardware
562  *  Statistics counters are clear on read.
563  **/
ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw * hw)564 int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
565 {
566 	uint16_t i = 0;
567 
568 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
569 
570 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
571 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
572 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
573 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
574 	for (i = 0; i < 8; i++)
575 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
576 
577 	IXGBE_READ_REG(hw, IXGBE_MLFC);
578 	IXGBE_READ_REG(hw, IXGBE_MRFC);
579 	IXGBE_READ_REG(hw, IXGBE_RLEC);
580 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
581 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
582 	if (hw->mac.type >= ixgbe_mac_82599EB) {
583 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
584 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
585 	} else {
586 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
587 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
588 	}
589 
590 	for (i = 0; i < 8; i++) {
591 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
592 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
593 		if (hw->mac.type >= ixgbe_mac_82599EB) {
594 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
595 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
596 		} else {
597 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
598 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
599 		}
600 	}
601 	if (hw->mac.type >= ixgbe_mac_82599EB)
602 		for (i = 0; i < 8; i++)
603 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
604 	IXGBE_READ_REG(hw, IXGBE_PRC64);
605 	IXGBE_READ_REG(hw, IXGBE_PRC127);
606 	IXGBE_READ_REG(hw, IXGBE_PRC255);
607 	IXGBE_READ_REG(hw, IXGBE_PRC511);
608 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
609 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
610 	IXGBE_READ_REG(hw, IXGBE_GPRC);
611 	IXGBE_READ_REG(hw, IXGBE_BPRC);
612 	IXGBE_READ_REG(hw, IXGBE_MPRC);
613 	IXGBE_READ_REG(hw, IXGBE_GPTC);
614 	IXGBE_READ_REG(hw, IXGBE_GORCL);
615 	IXGBE_READ_REG(hw, IXGBE_GORCH);
616 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
617 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
618 	if (hw->mac.type == ixgbe_mac_82598EB)
619 		for (i = 0; i < 8; i++)
620 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
621 	IXGBE_READ_REG(hw, IXGBE_RUC);
622 	IXGBE_READ_REG(hw, IXGBE_RFC);
623 	IXGBE_READ_REG(hw, IXGBE_ROC);
624 	IXGBE_READ_REG(hw, IXGBE_RJC);
625 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
626 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
627 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
628 	IXGBE_READ_REG(hw, IXGBE_TORL);
629 	IXGBE_READ_REG(hw, IXGBE_TORH);
630 	IXGBE_READ_REG(hw, IXGBE_TPR);
631 	IXGBE_READ_REG(hw, IXGBE_TPT);
632 	IXGBE_READ_REG(hw, IXGBE_PTC64);
633 	IXGBE_READ_REG(hw, IXGBE_PTC127);
634 	IXGBE_READ_REG(hw, IXGBE_PTC255);
635 	IXGBE_READ_REG(hw, IXGBE_PTC511);
636 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
637 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
638 	IXGBE_READ_REG(hw, IXGBE_MPTC);
639 	IXGBE_READ_REG(hw, IXGBE_BPTC);
640 	for (i = 0; i < 16; i++) {
641 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
642 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
643 		if (hw->mac.type >= ixgbe_mac_82599EB) {
644 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
645 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
646 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
647 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
648 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
649 		} else {
650 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
651 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
652 		}
653 	}
654 
655 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
656 		if (hw->phy.id == 0)
657 			ixgbe_identify_phy(hw);
658 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
659 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
660 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
661 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
662 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
663 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
664 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
665 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
666 	}
667 
668 	return IXGBE_SUCCESS;
669 }
670 
671 /**
672  *  ixgbe_get_mac_addr_generic - Generic get MAC address
673  *  @hw: pointer to hardware structure
674  *  @mac_addr: Adapter MAC address
675  *
676  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
677  *  A reset of the adapter must be performed prior to calling this function
678  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
679  **/
ixgbe_get_mac_addr_generic(struct ixgbe_hw * hw,uint8_t * mac_addr)680 int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *mac_addr)
681 {
682 	uint32_t rar_high;
683 	uint32_t rar_low;
684 	uint16_t i;
685 
686 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
687 
688 #ifdef __sparc64__
689 	struct ixgbe_osdep *os = hw->back;
690 
691 	if (OF_getprop(PCITAG_NODE(os->os_pa.pa_tag), "local-mac-address",
692 	    mac_addr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
693 		return IXGBE_SUCCESS;
694 #endif
695 
696 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
697 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
698 
699 	for (i = 0; i < 4; i++)
700 		mac_addr[i] = (uint8_t)(rar_low >> (i*8));
701 
702 	for (i = 0; i < 2; i++)
703 		mac_addr[i+4] = (uint8_t)(rar_high >> (i*8));
704 
705 	return IXGBE_SUCCESS;
706 }
707 
708 /**
709  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
710  *  @hw: pointer to hardware structure
711  *  @link_status: the link status returned by the PCI config space
712  *
713  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
714  **/
ixgbe_set_pci_config_data_generic(struct ixgbe_hw * hw,uint16_t link_status)715 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
716 				       uint16_t link_status)
717 {
718 	struct ixgbe_mac_info *mac = &hw->mac;
719 
720 	if (hw->bus.type == ixgbe_bus_type_unknown)
721 		hw->bus.type = ixgbe_bus_type_pci_express;
722 
723 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
724 	case IXGBE_PCI_LINK_WIDTH_1:
725 		hw->bus.width = ixgbe_bus_width_pcie_x1;
726 		break;
727 	case IXGBE_PCI_LINK_WIDTH_2:
728 		hw->bus.width = ixgbe_bus_width_pcie_x2;
729 		break;
730 	case IXGBE_PCI_LINK_WIDTH_4:
731 		hw->bus.width = ixgbe_bus_width_pcie_x4;
732 		break;
733 	case IXGBE_PCI_LINK_WIDTH_8:
734 		hw->bus.width = ixgbe_bus_width_pcie_x8;
735 		break;
736 	default:
737 		hw->bus.width = ixgbe_bus_width_unknown;
738 		break;
739 	}
740 
741 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
742 	case IXGBE_PCI_LINK_SPEED_2500:
743 		hw->bus.speed = ixgbe_bus_speed_2500;
744 		break;
745 	case IXGBE_PCI_LINK_SPEED_5000:
746 		hw->bus.speed = ixgbe_bus_speed_5000;
747 		break;
748 	case IXGBE_PCI_LINK_SPEED_8000:
749 		hw->bus.speed = ixgbe_bus_speed_8000;
750 		break;
751 	default:
752 		hw->bus.speed = ixgbe_bus_speed_unknown;
753 		break;
754 	}
755 
756 	mac->ops.set_lan_id(hw);
757 }
758 
759 /**
760  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
761  *  @hw: pointer to hardware structure
762  *
763  *  Gets the PCI bus info (speed, width, type) then calls helper function to
764  *  store this data within the ixgbe_hw structure.
765  **/
ixgbe_get_bus_info_generic(struct ixgbe_hw * hw)766 int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
767 {
768 	uint16_t link_status;
769 
770 	DEBUGFUNC("ixgbe_get_bus_info_generic");
771 
772 	/* Get the negotiated link width and speed from PCI config space */
773 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
774 
775 	ixgbe_set_pci_config_data_generic(hw, link_status);
776 
777 	return IXGBE_SUCCESS;
778 }
779 
780 /**
781  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
782  *  @hw: pointer to the HW structure
783  *
784  *  Determines the LAN function id by reading memory-mapped registers and swaps
785  *  the port value if requested, and set MAC instance for devices that share
786  *  CS4227.
787  **/
ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw * hw)788 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
789 {
790 	struct ixgbe_bus_info *bus = &hw->bus;
791 	uint32_t reg;
792 	uint16_t ee_ctrl_4;
793 
794 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
795 
796 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
797 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
798 	bus->lan_id = bus->func;
799 
800 	/* check for a port swap */
801 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
802 	if (reg & IXGBE_FACTPS_LFS)
803 		bus->func ^= 0x1;
804 
805 	/* Get MAC instance from EEPROM for configuring CS4227 */
806 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
807 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
808 		bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
809 				   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
810 	}
811 }
812 
813 /**
814  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
815  *  @hw: pointer to hardware structure
816  *
817  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
818  *  disables transmit and receive units. The adapter_stopped flag is used by
819  *  the shared code and drivers to determine if the adapter is in a stopped
820  *  state and should not touch the hardware.
821  **/
ixgbe_stop_adapter_generic(struct ixgbe_hw * hw)822 int32_t ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
823 {
824 	uint32_t reg_val;
825 	uint16_t i;
826 
827 	DEBUGFUNC("ixgbe_stop_adapter_generic");
828 
829 	/*
830 	 * Set the adapter_stopped flag so other driver functions stop touching
831 	 * the hardware
832 	 */
833 	hw->adapter_stopped = TRUE;
834 
835 	/* Disable the receive unit */
836 	ixgbe_disable_rx(hw);
837 
838 	/* Clear interrupt mask to stop interrupts from being generated */
839 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
840 
841 	/* Clear any pending interrupts, flush previous writes */
842 	IXGBE_READ_REG(hw, IXGBE_EICR);
843 
844 	/* Disable the transmit unit.  Each queue must be disabled. */
845 	for (i = 0; i < hw->mac.max_tx_queues; i++)
846 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
847 
848 	/* Disable the receive unit by stopping each queue */
849 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
850 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
851 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
852 		reg_val |= IXGBE_RXDCTL_SWFLSH;
853 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
854 	}
855 
856 	/* flush all queues disables */
857 	IXGBE_WRITE_FLUSH(hw);
858 	msec_delay(2);
859 
860 	/*
861 	 * Prevent the PCI-E bus from hanging by disabling PCI-E master
862 	 * access and verify no pending requests
863 	 */
864 	return ixgbe_disable_pcie_master(hw);
865 }
866 
867 /**
868  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
869  *  @hw: pointer to hardware structure
870  *  @index: led number to turn on
871  **/
ixgbe_led_on_generic(struct ixgbe_hw * hw,uint32_t index)872 int32_t ixgbe_led_on_generic(struct ixgbe_hw *hw, uint32_t index)
873 {
874 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
875 
876 	DEBUGFUNC("ixgbe_led_on_generic");
877 
878 	if (index > 3)
879 		return IXGBE_ERR_PARAM;
880 
881 	/* To turn on the LED, set mode to ON. */
882 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
883 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
884 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
885 	IXGBE_WRITE_FLUSH(hw);
886 
887 	return IXGBE_SUCCESS;
888 }
889 
890 /**
891  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
892  *  @hw: pointer to hardware structure
893  *  @index: led number to turn off
894  **/
ixgbe_led_off_generic(struct ixgbe_hw * hw,uint32_t index)895 int32_t ixgbe_led_off_generic(struct ixgbe_hw *hw, uint32_t index)
896 {
897 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
898 
899 	DEBUGFUNC("ixgbe_led_off_generic");
900 
901 	if (index > 3)
902 		return IXGBE_ERR_PARAM;
903 
904 	/* To turn off the LED, set mode to OFF. */
905 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
906 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
907 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
908 	IXGBE_WRITE_FLUSH(hw);
909 
910 	return IXGBE_SUCCESS;
911 }
912 
913 /**
914  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
915  *  @hw: pointer to hardware structure
916  *
917  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
918  *  ixgbe_hw struct in order to set up EEPROM access.
919  **/
ixgbe_init_eeprom_params_generic(struct ixgbe_hw * hw)920 int32_t ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
921 {
922 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
923 	uint32_t eec;
924 	uint16_t eeprom_size;
925 
926 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
927 
928 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
929 		eeprom->type = ixgbe_eeprom_none;
930 		/* Set default semaphore delay to 10ms which is a well
931 		 * tested value */
932 		eeprom->semaphore_delay = 10;
933 		/* Clear EEPROM page size, it will be initialized as needed */
934 		eeprom->word_page_size = 0;
935 
936 		/*
937 		 * Check for EEPROM present first.
938 		 * If not present leave as none
939 		 */
940 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
941 		if (eec & IXGBE_EEC_PRES) {
942 			eeprom->type = ixgbe_eeprom_spi;
943 
944 			/*
945 			 * SPI EEPROM is assumed here.  This code would need to
946 			 * change if a future EEPROM is not SPI.
947 			 */
948 			eeprom_size = (uint16_t)((eec & IXGBE_EEC_SIZE) >>
949 					    IXGBE_EEC_SIZE_SHIFT);
950 			eeprom->word_size = 1 << (eeprom_size +
951 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
952 		}
953 
954 		if (eec & IXGBE_EEC_ADDR_SIZE)
955 			eeprom->address_bits = 16;
956 		else
957 			eeprom->address_bits = 8;
958 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
959 			  "%d\n", eeprom->type, eeprom->word_size,
960 			  eeprom->address_bits);
961 	}
962 
963 	return IXGBE_SUCCESS;
964 }
965 
966 /**
967  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
968  *  @hw: pointer to hardware structure
969  *  @offset: offset within the EEPROM to be written to
970  *  @words: number of word(s)
971  *  @data: 16 bit word(s) to be written to the EEPROM
972  *
973  *  If ixgbe_eeprom_update_checksum is not called after this function, the
974  *  EEPROM will most likely contain an invalid checksum.
975  **/
ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw * hw,uint16_t offset,uint16_t words,uint16_t * data)976 static int32_t ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, uint16_t offset,
977 					      uint16_t words, uint16_t *data)
978 {
979 	int32_t status;
980 	uint16_t word;
981 	uint16_t page_size;
982 	uint16_t i;
983 	uint8_t write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
984 
985 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
986 
987 	/* Prepare the EEPROM for writing  */
988 	status = ixgbe_acquire_eeprom(hw);
989 
990 	if (status == IXGBE_SUCCESS) {
991 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
992 			ixgbe_release_eeprom(hw);
993 			status = IXGBE_ERR_EEPROM;
994 		}
995 	}
996 
997 	if (status == IXGBE_SUCCESS) {
998 		for (i = 0; i < words; i++) {
999 			ixgbe_standby_eeprom(hw);
1000 
1001 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1002 			ixgbe_shift_out_eeprom_bits(hw,
1003 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1004 						   IXGBE_EEPROM_OPCODE_BITS);
1005 
1006 			ixgbe_standby_eeprom(hw);
1007 
1008 			/*
1009 			 * Some SPI eeproms use the 8th address bit embedded
1010 			 * in the opcode
1011 			 */
1012 			if ((hw->eeprom.address_bits == 8) &&
1013 			    ((offset + i) >= 128))
1014 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1015 
1016 			/* Send the Write command (8-bit opcode + addr) */
1017 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1018 						    IXGBE_EEPROM_OPCODE_BITS);
1019 			ixgbe_shift_out_eeprom_bits(hw, (uint16_t)((offset + i) * 2),
1020 						    hw->eeprom.address_bits);
1021 
1022 			page_size = hw->eeprom.word_page_size;
1023 
1024 			/* Send the data in burst via SPI*/
1025 			do {
1026 				word = data[i];
1027 				word = (word >> 8) | (word << 8);
1028 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1029 
1030 				if (page_size == 0)
1031 					break;
1032 
1033 				/* do not wrap around page */
1034 				if (((offset + i) & (page_size - 1)) ==
1035 				    (page_size - 1))
1036 					break;
1037 			} while (++i < words);
1038 
1039 			ixgbe_standby_eeprom(hw);
1040 			msec_delay(10);
1041 		}
1042 		/* Done with writing - release the EEPROM */
1043 		ixgbe_release_eeprom(hw);
1044 	}
1045 
1046 	return status;
1047 }
1048 
1049 /**
1050  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1051  *  @hw: pointer to hardware structure
1052  *  @offset: offset within the EEPROM to be written to
1053  *  @data: 16 bit word to be written to the EEPROM
1054  *
1055  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1056  *  EEPROM will most likely contain an invalid checksum.
1057  **/
ixgbe_write_eeprom_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t data)1058 int32_t ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
1059 {
1060 	int32_t status;
1061 
1062 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1063 
1064 	hw->eeprom.ops.init_params(hw);
1065 
1066 	if (offset >= hw->eeprom.word_size) {
1067 		status = IXGBE_ERR_EEPROM;
1068 		goto out;
1069 	}
1070 
1071 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1072 
1073 out:
1074 	return status;
1075 }
1076 
1077 /**
1078  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1079  *  @hw: pointer to hardware structure
1080  *  @offset: offset within the EEPROM to be read
1081  *  @words: number of word(s)
1082  *  @data: read 16 bit word(s) from EEPROM
1083  *
1084  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1085  **/
ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw * hw,uint16_t offset,uint16_t words,uint16_t * data)1086 static int32_t ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, uint16_t offset,
1087 					     uint16_t words, uint16_t *data)
1088 {
1089 	int32_t status;
1090 	uint16_t word_in;
1091 	uint8_t read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1092 	uint16_t i;
1093 
1094 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1095 
1096 	/* Prepare the EEPROM for reading  */
1097 	status = ixgbe_acquire_eeprom(hw);
1098 
1099 	if (status == IXGBE_SUCCESS) {
1100 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1101 			ixgbe_release_eeprom(hw);
1102 			status = IXGBE_ERR_EEPROM;
1103 		}
1104 	}
1105 
1106 	if (status == IXGBE_SUCCESS) {
1107 		for (i = 0; i < words; i++) {
1108 			ixgbe_standby_eeprom(hw);
1109 			/*
1110 			 * Some SPI eeproms use the 8th address bit embedded
1111 			 * in the opcode
1112 			 */
1113 			if ((hw->eeprom.address_bits == 8) &&
1114 			    ((offset + i) >= 128))
1115 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1116 
1117 			/* Send the READ command (opcode + addr) */
1118 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1119 						    IXGBE_EEPROM_OPCODE_BITS);
1120 			ixgbe_shift_out_eeprom_bits(hw, (uint16_t)((offset + i) * 2),
1121 						    hw->eeprom.address_bits);
1122 
1123 			/* Read the data. */
1124 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1125 			data[i] = (word_in >> 8) | (word_in << 8);
1126 		}
1127 
1128 		/* End this read operation */
1129 		ixgbe_release_eeprom(hw);
1130 	}
1131 
1132 	return status;
1133 }
1134 
1135 /**
1136  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1137  *  @hw: pointer to hardware structure
1138  *  @offset: offset within the EEPROM to be read
1139  *  @data: read 16 bit value from EEPROM
1140  *
1141  *  Reads 16 bit value from EEPROM through bit-bang method
1142  **/
ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t * data)1143 int32_t ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, uint16_t offset,
1144 				       uint16_t *data)
1145 {
1146 	int32_t status;
1147 
1148 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1149 
1150 	hw->eeprom.ops.init_params(hw);
1151 
1152 	if (offset >= hw->eeprom.word_size) {
1153 		status = IXGBE_ERR_EEPROM;
1154 		goto out;
1155 	}
1156 
1157 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1158 
1159 out:
1160 	return status;
1161 }
1162 
1163 /**
1164  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1165  *  @hw: pointer to hardware structure
1166  *  @offset: offset of word in the EEPROM to read
1167  *  @words: number of word(s)
1168  *  @data: 16 bit word(s) from the EEPROM
1169  *
1170  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1171  **/
ixgbe_read_eerd_buffer_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t words,uint16_t * data)1172 int32_t ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, uint16_t offset,
1173 				   uint16_t words, uint16_t *data)
1174 {
1175 	uint32_t eerd;
1176 	int32_t status = IXGBE_SUCCESS;
1177 	uint32_t i;
1178 
1179 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1180 
1181 	hw->eeprom.ops.init_params(hw);
1182 
1183 	if (words == 0) {
1184 		status = IXGBE_ERR_INVALID_ARGUMENT;
1185 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1186 		goto out;
1187 	}
1188 
1189 	if (offset >= hw->eeprom.word_size) {
1190 		status = IXGBE_ERR_EEPROM;
1191 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1192 		goto out;
1193 	}
1194 
1195 	for (i = 0; i < words; i++) {
1196 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1197 		       IXGBE_EEPROM_RW_REG_START;
1198 
1199 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1200 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1201 
1202 		if (status == IXGBE_SUCCESS) {
1203 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1204 				   IXGBE_EEPROM_RW_REG_DATA);
1205 		} else {
1206 			DEBUGOUT("Eeprom read timed out\n");
1207 			goto out;
1208 		}
1209 	}
1210 out:
1211 	return status;
1212 }
1213 
1214 /**
1215  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1216  *  @hw: pointer to hardware structure
1217  *  @offset: offset of  word in the EEPROM to read
1218  *  @data: word read from the EEPROM
1219  *
1220  *  Reads a 16 bit word from the EEPROM using the EERD register.
1221  **/
ixgbe_read_eerd_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t * data)1222 int32_t ixgbe_read_eerd_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t *data)
1223 {
1224 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1225 }
1226 
1227 /**
1228  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1229  *  @hw: pointer to hardware structure
1230  *  @offset: offset of  word in the EEPROM to write
1231  *  @words: number of word(s)
1232  *  @data: word(s) write to the EEPROM
1233  *
1234  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1235  **/
ixgbe_write_eewr_buffer_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t words,uint16_t * data)1236 int32_t ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, uint16_t offset,
1237 				    uint16_t words, uint16_t *data)
1238 {
1239 	uint32_t eewr;
1240 	int32_t status = IXGBE_SUCCESS;
1241 	uint16_t i;
1242 
1243 	DEBUGFUNC("ixgbe_write_eewr_generic");
1244 
1245 	hw->eeprom.ops.init_params(hw);
1246 
1247 	if (words == 0) {
1248 		status = IXGBE_ERR_INVALID_ARGUMENT;
1249 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1250 		goto out;
1251 	}
1252 
1253 	if (offset >= hw->eeprom.word_size) {
1254 		status = IXGBE_ERR_EEPROM;
1255 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1256 		goto out;
1257 	}
1258 
1259 	for (i = 0; i < words; i++) {
1260 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1261 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1262 			IXGBE_EEPROM_RW_REG_START;
1263 
1264 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1265 		if (status != IXGBE_SUCCESS) {
1266 			DEBUGOUT("Eeprom write EEWR timed out\n");
1267 			goto out;
1268 		}
1269 
1270 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1271 
1272 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1273 		if (status != IXGBE_SUCCESS) {
1274 			DEBUGOUT("Eeprom write EEWR timed out\n");
1275 			goto out;
1276 		}
1277 	}
1278 
1279 out:
1280 	return status;
1281 }
1282 
1283 /**
1284  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1285  *  @hw: pointer to hardware structure
1286  *  @offset: offset of  word in the EEPROM to write
1287  *  @data: word write to the EEPROM
1288  *
1289  *  Write a 16 bit word to the EEPROM using the EEWR register.
1290  **/
ixgbe_write_eewr_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t data)1291 int32_t ixgbe_write_eewr_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
1292 {
1293 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1294 }
1295 
1296 /**
1297  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1298  *  @hw: pointer to hardware structure
1299  *  @ee_reg: EEPROM flag for polling
1300  *
1301  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1302  *  read or write is done respectively.
1303  **/
ixgbe_poll_eerd_eewr_done(struct ixgbe_hw * hw,uint32_t ee_reg)1304 int32_t ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, uint32_t ee_reg)
1305 {
1306 	uint32_t i;
1307 	uint32_t reg;
1308 	int32_t status = IXGBE_ERR_EEPROM;
1309 
1310 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1311 
1312 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1313 		if (ee_reg == IXGBE_NVM_POLL_READ)
1314 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1315 		else
1316 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1317 
1318 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1319 			status = IXGBE_SUCCESS;
1320 			break;
1321 		}
1322 		usec_delay(5);
1323 	}
1324 
1325 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1326 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1327 			     "EEPROM read/write done polling timed out");
1328 
1329 	return status;
1330 }
1331 
1332 /**
1333  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1334  *  @hw: pointer to hardware structure
1335  *
1336  *  Prepares EEPROM for access using bit-bang method. This function should
1337  *  be called before issuing a command to the EEPROM.
1338  **/
ixgbe_acquire_eeprom(struct ixgbe_hw * hw)1339 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1340 {
1341 	int32_t status = IXGBE_SUCCESS;
1342 	uint32_t eec;
1343 	uint32_t i;
1344 
1345 	DEBUGFUNC("ixgbe_acquire_eeprom");
1346 
1347 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1348 	    != IXGBE_SUCCESS)
1349 		status = IXGBE_ERR_SWFW_SYNC;
1350 
1351 	if (status == IXGBE_SUCCESS) {
1352 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1353 
1354 		/* Request EEPROM Access */
1355 		eec |= IXGBE_EEC_REQ;
1356 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1357 
1358 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1359 			eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1360 			if (eec & IXGBE_EEC_GNT)
1361 				break;
1362 			usec_delay(5);
1363 		}
1364 
1365 		/* Release if grant not acquired */
1366 		if (!(eec & IXGBE_EEC_GNT)) {
1367 			eec &= ~IXGBE_EEC_REQ;
1368 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1369 			DEBUGOUT("Could not acquire EEPROM grant\n");
1370 
1371 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1372 			status = IXGBE_ERR_EEPROM;
1373 		}
1374 
1375 		/* Setup EEPROM for Read/Write */
1376 		if (status == IXGBE_SUCCESS) {
1377 			/* Clear CS and SK */
1378 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1379 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1380 			IXGBE_WRITE_FLUSH(hw);
1381 			usec_delay(1);
1382 		}
1383 	}
1384 	return status;
1385 }
1386 
1387 /**
1388  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1389  *  @hw: pointer to hardware structure
1390  *
1391  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1392  **/
ixgbe_get_eeprom_semaphore(struct ixgbe_hw * hw)1393 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1394 {
1395 	int32_t status = IXGBE_ERR_EEPROM;
1396 	uint32_t timeout = 2000;
1397 	uint32_t i;
1398 	uint32_t swsm;
1399 
1400 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1401 
1402 
1403 	/* Get SMBI software semaphore between device drivers first */
1404 	for (i = 0; i < timeout; i++) {
1405 		/*
1406 		 * If the SMBI bit is 0 when we read it, then the bit will be
1407 		 * set and we have the semaphore
1408 		 */
1409 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1410 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1411 			status = IXGBE_SUCCESS;
1412 			break;
1413 		}
1414 		usec_delay(50);
1415 	}
1416 
1417 	if (i == timeout) {
1418 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1419 			 "not granted.\n");
1420 		/*
1421 		 * this release is particularly important because our attempts
1422 		 * above to get the semaphore may have succeeded, and if there
1423 		 * was a timeout, we should unconditionally clear the semaphore
1424 		 * bits to free the driver to make progress
1425 		 */
1426 		ixgbe_release_eeprom_semaphore(hw);
1427 
1428 		usec_delay(50);
1429 		/*
1430 		 * one last try
1431 		 * If the SMBI bit is 0 when we read it, then the bit will be
1432 		 * set and we have the semaphore
1433 		 */
1434 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1435 		if (!(swsm & IXGBE_SWSM_SMBI))
1436 			status = IXGBE_SUCCESS;
1437 	}
1438 
1439 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1440 	if (status == IXGBE_SUCCESS) {
1441 		for (i = 0; i < timeout; i++) {
1442 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1443 
1444 			/* Set the SW EEPROM semaphore bit to request access */
1445 			swsm |= IXGBE_SWSM_SWESMBI;
1446 			IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1447 
1448 			/*
1449 			 * If we set the bit successfully then we got the
1450 			 * semaphore.
1451 			 */
1452 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1453 			if (swsm & IXGBE_SWSM_SWESMBI)
1454 				break;
1455 
1456 			usec_delay(50);
1457 		}
1458 
1459 		/*
1460 		 * Release semaphores and return error if SW EEPROM semaphore
1461 		 * was not granted because we don't have access to the EEPROM
1462 		 */
1463 		if (i >= timeout) {
1464 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1465 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1466 			ixgbe_release_eeprom_semaphore(hw);
1467 			status = IXGBE_ERR_EEPROM;
1468 		}
1469 	} else {
1470 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1471 			     "Software semaphore SMBI between device drivers "
1472 			     "not granted.\n");
1473 	}
1474 
1475 	return status;
1476 }
1477 
1478 /**
1479  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1480  *  @hw: pointer to hardware structure
1481  *
1482  *  This function clears hardware semaphore bits.
1483  **/
ixgbe_release_eeprom_semaphore(struct ixgbe_hw * hw)1484 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1485 {
1486 	uint32_t swsm;
1487 
1488 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1489 
1490 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1491 
1492 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1493 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1494 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1495 	IXGBE_WRITE_FLUSH(hw);
1496 }
1497 
1498 /**
1499  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1500  *  @hw: pointer to hardware structure
1501  **/
ixgbe_ready_eeprom(struct ixgbe_hw * hw)1502 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1503 {
1504 	int32_t status = IXGBE_SUCCESS;
1505 	uint16_t i;
1506 	uint8_t spi_stat_reg;
1507 
1508 	DEBUGFUNC("ixgbe_ready_eeprom");
1509 
1510 	/*
1511 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1512 	 * EEPROM will signal that the command has been completed by clearing
1513 	 * bit 0 of the internal status register.  If it's not cleared within
1514 	 * 5 milliseconds, then error out.
1515 	 */
1516 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1517 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1518 					    IXGBE_EEPROM_OPCODE_BITS);
1519 		spi_stat_reg = (uint8_t)ixgbe_shift_in_eeprom_bits(hw, 8);
1520 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1521 			break;
1522 
1523 		usec_delay(5);
1524 		ixgbe_standby_eeprom(hw);
1525 	}
1526 
1527 	/*
1528 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1529 	 * devices (and only 0-5mSec on 5V devices)
1530 	 */
1531 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1532 		DEBUGOUT("SPI EEPROM Status error\n");
1533 		status = IXGBE_ERR_EEPROM;
1534 	}
1535 
1536 	return status;
1537 }
1538 
1539 /**
1540  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1541  *  @hw: pointer to hardware structure
1542  **/
ixgbe_standby_eeprom(struct ixgbe_hw * hw)1543 void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1544 {
1545 	uint32_t eec;
1546 
1547 	DEBUGFUNC("ixgbe_standby_eeprom");
1548 
1549 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1550 
1551 	/* Toggle CS to flush commands */
1552 	eec |= IXGBE_EEC_CS;
1553 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1554 	IXGBE_WRITE_FLUSH(hw);
1555 	usec_delay(1);
1556 	eec &= ~IXGBE_EEC_CS;
1557 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1558 	IXGBE_WRITE_FLUSH(hw);
1559 	usec_delay(1);
1560 }
1561 
1562 /**
1563  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1564  *  @hw: pointer to hardware structure
1565  *  @data: data to send to the EEPROM
1566  *  @count: number of bits to shift out
1567  **/
ixgbe_shift_out_eeprom_bits(struct ixgbe_hw * hw,uint16_t data,uint16_t count)1568 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
1569 				 uint16_t count)
1570 {
1571 	uint32_t eec;
1572 	uint32_t mask;
1573 	uint32_t i;
1574 
1575 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1576 
1577 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1578 
1579 	/*
1580 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
1581 	 * one bit at a time.  Determine the starting bit based on count
1582 	 */
1583 	mask = 0x01 << (count - 1);
1584 
1585 	for (i = 0; i < count; i++) {
1586 		/*
1587 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1588 		 * "1", and then raising and then lowering the clock (the SK
1589 		 * bit controls the clock input to the EEPROM).  A "0" is
1590 		 * shifted out to the EEPROM by setting "DI" to "0" and then
1591 		 * raising and then lowering the clock.
1592 		 */
1593 		if (data & mask)
1594 			eec |= IXGBE_EEC_DI;
1595 		else
1596 			eec &= ~IXGBE_EEC_DI;
1597 
1598 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1599 		IXGBE_WRITE_FLUSH(hw);
1600 
1601 		usec_delay(1);
1602 
1603 		ixgbe_raise_eeprom_clk(hw, &eec);
1604 		ixgbe_lower_eeprom_clk(hw, &eec);
1605 
1606 		/*
1607 		 * Shift mask to signify next bit of data to shift in to the
1608 		 * EEPROM
1609 		 */
1610 		mask = mask >> 1;
1611 	}
1612 
1613 	/* We leave the "DI" bit set to "0" when we leave this routine. */
1614 	eec &= ~IXGBE_EEC_DI;
1615 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1616 	IXGBE_WRITE_FLUSH(hw);
1617 }
1618 
1619 /**
1620  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1621  *  @hw: pointer to hardware structure
1622  *  @count: number of bits to shift
1623  **/
ixgbe_shift_in_eeprom_bits(struct ixgbe_hw * hw,uint16_t count)1624 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
1625 {
1626 	uint32_t eec;
1627 	uint32_t i;
1628 	uint16_t data = 0;
1629 
1630 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1631 
1632 	/*
1633 	 * In order to read a register from the EEPROM, we need to shift
1634 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1635 	 * the clock input to the EEPROM (setting the SK bit), and then reading
1636 	 * the value of the "DO" bit.  During this "shifting in" process the
1637 	 * "DI" bit should always be clear.
1638 	 */
1639 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1640 
1641 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1642 
1643 	for (i = 0; i < count; i++) {
1644 		data = data << 1;
1645 		ixgbe_raise_eeprom_clk(hw, &eec);
1646 
1647 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1648 
1649 		eec &= ~(IXGBE_EEC_DI);
1650 		if (eec & IXGBE_EEC_DO)
1651 			data |= 1;
1652 
1653 		ixgbe_lower_eeprom_clk(hw, &eec);
1654 	}
1655 
1656 	return data;
1657 }
1658 
1659 /**
1660  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1661  *  @hw: pointer to hardware structure
1662  *  @eec: EEC register's current value
1663  **/
ixgbe_raise_eeprom_clk(struct ixgbe_hw * hw,uint32_t * eec)1664 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1665 {
1666 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
1667 
1668 	/*
1669 	 * Raise the clock input to the EEPROM
1670 	 * (setting the SK bit), then delay
1671 	 */
1672 	*eec = *eec | IXGBE_EEC_SK;
1673 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
1674 	IXGBE_WRITE_FLUSH(hw);
1675 	usec_delay(1);
1676 }
1677 
1678 /**
1679  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1680  *  @hw: pointer to hardware structure
1681  *  @eec: EEC's current value
1682  **/
ixgbe_lower_eeprom_clk(struct ixgbe_hw * hw,uint32_t * eec)1683 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1684 {
1685 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
1686 
1687 	/*
1688 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
1689 	 * delay
1690 	 */
1691 	*eec = *eec & ~IXGBE_EEC_SK;
1692 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
1693 	IXGBE_WRITE_FLUSH(hw);
1694 	usec_delay(1);
1695 }
1696 
1697 /**
1698  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
1699  *  @hw: pointer to hardware structure
1700  **/
ixgbe_release_eeprom(struct ixgbe_hw * hw)1701 void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1702 {
1703 	uint32_t eec;
1704 
1705 	DEBUGFUNC("ixgbe_release_eeprom");
1706 
1707 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1708 
1709 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
1710 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1711 
1712 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1713 	IXGBE_WRITE_FLUSH(hw);
1714 
1715 	usec_delay(1);
1716 
1717 	/* Stop requesting EEPROM access */
1718 	eec &= ~IXGBE_EEC_REQ;
1719 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1720 
1721 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1722 
1723 	/* Delay before attempt to obtain semaphore again to allow FW access */
1724 	msec_delay(hw->eeprom.semaphore_delay);
1725 }
1726 
1727 /**
1728  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1729  *  @hw: pointer to hardware structure
1730  *
1731  *  Returns a negative error code on error, or the 16-bit checksum
1732  **/
ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw * hw)1733 int32_t ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1734 {
1735 	uint16_t i;
1736 	uint16_t j;
1737 	uint16_t checksum = 0;
1738 	uint16_t length = 0;
1739 	uint16_t pointer = 0;
1740 	uint16_t word = 0;
1741 
1742 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1743 
1744 	/* Include 0x0-0x3F in the checksum */
1745 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1746 		if (hw->eeprom.ops.read(hw, i, &word)) {
1747 			DEBUGOUT("EEPROM read failed\n");
1748 			return IXGBE_ERR_EEPROM;
1749 		}
1750 		checksum += word;
1751 	}
1752 
1753 	/* Include all data from pointers except for the fw pointer */
1754 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1755 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
1756 			DEBUGOUT("EEPROM read failed\n");
1757 			return IXGBE_ERR_EEPROM;
1758 		}
1759 
1760 		/* If the pointer seems invalid */
1761 		if (pointer == 0xFFFF || pointer == 0)
1762 			continue;
1763 
1764 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
1765 			DEBUGOUT("EEPROM read failed\n");
1766 			return IXGBE_ERR_EEPROM;
1767 		}
1768 
1769 		if (length == 0xFFFF || length == 0)
1770 			continue;
1771 
1772 		for (j = pointer + 1; j <= pointer + length; j++) {
1773 			if (hw->eeprom.ops.read(hw, j, &word)) {
1774 				DEBUGOUT("EEPROM read failed\n");
1775 				return IXGBE_ERR_EEPROM;
1776 			}
1777 			checksum += word;
1778 		}
1779 	}
1780 
1781 	checksum = (uint16_t)IXGBE_EEPROM_SUM - checksum;
1782 
1783 	return (int32_t)checksum;
1784 }
1785 
1786 /**
1787  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1788  *  @hw: pointer to hardware structure
1789  *  @checksum_val: calculated checksum
1790  *
1791  *  Performs checksum calculation and validates the EEPROM checksum.  If the
1792  *  caller does not need checksum_val, the value can be NULL.
1793  **/
ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw * hw,uint16_t * checksum_val)1794 int32_t ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1795 					       uint16_t *checksum_val)
1796 {
1797 	int32_t status;
1798 	uint16_t checksum;
1799 	uint16_t read_checksum = 0;
1800 
1801 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1802 
1803 	/* Read the first word from the EEPROM. If this times out or fails, do
1804 	 * not continue or we could be in for a very long wait while every
1805 	 * EEPROM read fails
1806 	 */
1807 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1808 	if (status) {
1809 		DEBUGOUT("EEPROM read failed\n");
1810 		return status;
1811 	}
1812 
1813 	status = hw->eeprom.ops.calc_checksum(hw);
1814 	if (status < 0)
1815 		return status;
1816 
1817 	checksum = (uint16_t)(status & 0xffff);
1818 
1819 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1820 	if (status) {
1821 		DEBUGOUT("EEPROM read failed\n");
1822 		return status;
1823 	}
1824 
1825 	/* Verify read checksum from EEPROM is the same as
1826 	 * calculated checksum
1827 	 */
1828 	if (read_checksum != checksum)
1829 		status = IXGBE_ERR_EEPROM_CHECKSUM;
1830 
1831 	/* If the user cares, return the calculated checksum */
1832 	if (checksum_val)
1833 		*checksum_val = checksum;
1834 
1835 	return status;
1836 }
1837 
1838 /**
1839  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1840  *  @hw: pointer to hardware structure
1841  **/
ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw * hw)1842 int32_t ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1843 {
1844 	int32_t status;
1845 	uint16_t checksum;
1846 
1847 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1848 
1849 	/* Read the first word from the EEPROM. If this times out or fails, do
1850 	 * not continue or we could be in for a very long wait while every
1851 	 * EEPROM read fails
1852 	 */
1853 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1854 	if (status) {
1855 		DEBUGOUT("EEPROM read failed\n");
1856 		return status;
1857 	}
1858 
1859 	status = hw->eeprom.ops.calc_checksum(hw);
1860 	if (status < 0)
1861 		return status;
1862 
1863 	checksum = (uint16_t)(status & 0xffff);
1864 
1865 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
1866 
1867 	return status;
1868 }
1869 
1870 /**
1871  *  ixgbe_validate_mac_addr - Validate MAC address
1872  *  @mac_addr: pointer to MAC address.
1873  *
1874  *  Tests a MAC address to ensure it is a valid Individual Address
1875  **/
ixgbe_validate_mac_addr(uint8_t * mac_addr)1876 int32_t ixgbe_validate_mac_addr(uint8_t *mac_addr)
1877 {
1878 	int32_t status = IXGBE_SUCCESS;
1879 
1880 	DEBUGFUNC("ixgbe_validate_mac_addr");
1881 
1882 	/* Make sure it is not a multicast address */
1883 	if (IXGBE_IS_MULTICAST(mac_addr)) {
1884 		DEBUGOUT("MAC address is multicast\n");
1885 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1886 	/* Not a broadcast address */
1887 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
1888 		DEBUGOUT("MAC address is broadcast\n");
1889 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1890 	/* Reject the zero address */
1891 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1892 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1893 		DEBUGOUT("MAC address is all zeros\n");
1894 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1895 	}
1896 	return status;
1897 }
1898 
1899 /**
1900  *  ixgbe_set_rar_generic - Set Rx address register
1901  *  @hw: pointer to hardware structure
1902  *  @index: Receive address register to write
1903  *  @addr: Address to put into receive address register
1904  *  @vmdq: VMDq "set" or "pool" index
1905  *  @enable_addr: set flag that address is active
1906  *
1907  *  Puts an ethernet address into a receive address register.
1908  **/
ixgbe_set_rar_generic(struct ixgbe_hw * hw,uint32_t index,uint8_t * addr,uint32_t vmdq,uint32_t enable_addr)1909 int32_t ixgbe_set_rar_generic(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
1910 			      uint32_t vmdq, uint32_t enable_addr)
1911 {
1912 	uint32_t rar_low, rar_high;
1913 	uint32_t rar_entries = hw->mac.num_rar_entries;
1914 
1915 	DEBUGFUNC("ixgbe_set_rar_generic");
1916 
1917 	/* Make sure we are using a valid rar index range */
1918 	if (index >= rar_entries) {
1919 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1920 			     "RAR index %d is out of range.\n", index);
1921 		return IXGBE_ERR_INVALID_ARGUMENT;
1922 	}
1923 
1924 	/* setup VMDq pool selection before this RAR gets enabled */
1925 	hw->mac.ops.set_vmdq(hw, index, vmdq);
1926 
1927 	/*
1928 	 * HW expects these in little endian so we reverse the byte
1929 	 * order from network order (big endian) to little endian
1930 	 */
1931 	rar_low = ((uint32_t)addr[0] |
1932 		   ((uint32_t)addr[1] << 8) |
1933 		   ((uint32_t)addr[2] << 16) |
1934 		   ((uint32_t)addr[3] << 24));
1935 	/*
1936 	 * Some parts put the VMDq setting in the extra RAH bits,
1937 	 * so save everything except the lower 16 bits that hold part
1938 	 * of the address and the address valid bit.
1939 	 */
1940 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1941 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1942 	rar_high |= ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
1943 
1944 	if (enable_addr != 0)
1945 		rar_high |= IXGBE_RAH_AV;
1946 
1947 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1948 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1949 
1950 	return IXGBE_SUCCESS;
1951 }
1952 
1953 /**
1954  *  ixgbe_clear_rar_generic - Remove Rx address register
1955  *  @hw: pointer to hardware structure
1956  *  @index: Receive address register to write
1957  *
1958  *  Clears an ethernet address from a receive address register.
1959  **/
ixgbe_clear_rar_generic(struct ixgbe_hw * hw,uint32_t index)1960 int32_t ixgbe_clear_rar_generic(struct ixgbe_hw *hw, uint32_t index)
1961 {
1962 	uint32_t rar_high;
1963 	uint32_t rar_entries = hw->mac.num_rar_entries;
1964 
1965 	DEBUGFUNC("ixgbe_clear_rar_generic");
1966 
1967 	/* Make sure we are using a valid rar index range */
1968 	if (index >= rar_entries) {
1969 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1970 			     "RAR index %d is out of range.\n", index);
1971 		return IXGBE_ERR_INVALID_ARGUMENT;
1972 	}
1973 
1974 	/*
1975 	 * Some parts put the VMDq setting in the extra RAH bits,
1976 	 * so save everything except the lower 16 bits that hold part
1977 	 * of the address and the address valid bit.
1978 	 */
1979 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1980 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1981 
1982 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1983 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1984 
1985 	/* clear VMDq pool/queue selection for this RAR */
1986 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1987 
1988 	return IXGBE_SUCCESS;
1989 }
1990 
1991 /**
1992  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1993  *  @hw: pointer to hardware structure
1994  *
1995  *  Places the MAC address in receive address register 0 and clears the rest
1996  *  of the receive address registers. Clears the multicast table. Assumes
1997  *  the receiver is in reset when the routine is called.
1998  **/
ixgbe_init_rx_addrs_generic(struct ixgbe_hw * hw)1999 int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2000 {
2001 	uint32_t i;
2002 	uint32_t rar_entries = hw->mac.num_rar_entries;
2003 
2004 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2005 
2006 	/*
2007 	 * If the current mac address is valid, assume it is a software override
2008 	 * to the permanent address.
2009 	 * Otherwise, use the permanent address from the eeprom.
2010 	 */
2011 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2012 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2013 		/* Get the MAC address from the RAR0 for later reference */
2014 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2015 
2016 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2017 			  hw->mac.addr[0], hw->mac.addr[1],
2018 			  hw->mac.addr[2]);
2019 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2020 			  hw->mac.addr[4], hw->mac.addr[5]);
2021 	} else {
2022 		/* Setup the receive address. */
2023 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2024 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2025 			  hw->mac.addr[0], hw->mac.addr[1],
2026 			  hw->mac.addr[2]);
2027 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2028 			  hw->mac.addr[4], hw->mac.addr[5]);
2029 
2030 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2031 	}
2032 
2033 	/* clear VMDq pool/queue selection for RAR 0 */
2034 	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2035 
2036 	hw->addr_ctrl.overflow_promisc = 0;
2037 
2038 	hw->addr_ctrl.rar_used_count = 1;
2039 
2040 	/* Zero out the other receive addresses. */
2041 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2042 	for (i = 1; i < rar_entries; i++) {
2043 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2044 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2045 	}
2046 
2047 	/* Clear the MTA */
2048 	hw->addr_ctrl.mta_in_use = 0;
2049 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2050 
2051 	DEBUGOUT(" Clearing MTA\n");
2052 	for (i = 0; i < hw->mac.mcft_size; i++)
2053 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2054 
2055 	ixgbe_init_uta_tables(hw);
2056 
2057 	return IXGBE_SUCCESS;
2058 }
2059 
2060 /**
2061  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
2062  *  @hw: pointer to hardware structure
2063  *  @addr: new address
2064  *  @vmdq: VMDq "set" or "pool" index
2065  *
2066  *  Adds it to unused receive address register or goes into promiscuous mode.
2067  **/
ixgbe_add_uc_addr(struct ixgbe_hw * hw,uint8_t * addr,uint32_t vmdq)2068 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
2069 {
2070 	uint32_t rar_entries = hw->mac.num_rar_entries;
2071 	uint32_t rar;
2072 
2073 	DEBUGFUNC("ixgbe_add_uc_addr");
2074 
2075 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2076 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2077 
2078 	/*
2079 	 * Place this address in the RAR if there is room,
2080 	 * else put the controller into promiscuous mode
2081 	 */
2082 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2083 		rar = hw->addr_ctrl.rar_used_count;
2084 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2085 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2086 		hw->addr_ctrl.rar_used_count++;
2087 	} else {
2088 		hw->addr_ctrl.overflow_promisc++;
2089 	}
2090 
2091 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2092 }
2093 
2094 /**
2095  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2096  *  @hw: pointer to hardware structure
2097  *  @mc_addr: the multicast address
2098  *
2099  *  Extracts the 12 bits, from a multicast address, to determine which
2100  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2101  *  incoming rx multicast addresses, to determine the bit-vector to check in
2102  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2103  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2104  *  to mc_filter_type.
2105  **/
ixgbe_mta_vector(struct ixgbe_hw * hw,uint8_t * mc_addr)2106 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr)
2107 {
2108 	uint32_t vector = 0;
2109 
2110 	DEBUGFUNC("ixgbe_mta_vector");
2111 
2112 	switch (hw->mac.mc_filter_type) {
2113 	case 0:   /* use bits [47:36] of the address */
2114 		vector = ((mc_addr[4] >> 4) | (((uint16_t)mc_addr[5]) << 4));
2115 		break;
2116 	case 1:   /* use bits [46:35] of the address */
2117 		vector = ((mc_addr[4] >> 3) | (((uint16_t)mc_addr[5]) << 5));
2118 		break;
2119 	case 2:   /* use bits [45:34] of the address */
2120 		vector = ((mc_addr[4] >> 2) | (((uint16_t)mc_addr[5]) << 6));
2121 		break;
2122 	case 3:   /* use bits [43:32] of the address */
2123 		vector = ((mc_addr[4]) | (((uint16_t)mc_addr[5]) << 8));
2124 		break;
2125 	default:  /* Invalid mc_filter_type */
2126 		DEBUGOUT("MC filter type param set incorrectly\n");
2127 		panic("incorrect multicast filter type");
2128 		break;
2129 	}
2130 
2131 	/* vector can only be 12-bits or boundary will be exceeded */
2132 	vector &= 0xFFF;
2133 	return vector;
2134 }
2135 
2136 /**
2137  *  ixgbe_set_mta - Set bit-vector in multicast table
2138  *  @hw: pointer to hardware structure
2139  *  @mc_addr: Multicast address
2140  *
2141  *  Sets the bit-vector in the multicast table.
2142  **/
ixgbe_set_mta(struct ixgbe_hw * hw,uint8_t * mc_addr)2143 void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr)
2144 {
2145 	uint32_t vector;
2146 	uint32_t vector_bit;
2147 	uint32_t vector_reg;
2148 
2149 	DEBUGFUNC("ixgbe_set_mta");
2150 
2151 	hw->addr_ctrl.mta_in_use++;
2152 
2153 	vector = ixgbe_mta_vector(hw, mc_addr);
2154 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2155 
2156 	/*
2157 	 * The MTA is a register array of 128 32-bit registers. It is treated
2158 	 * like an array of 4096 bits.  We want to set bit
2159 	 * BitArray[vector_value]. So we figure out what register the bit is
2160 	 * in, read it, OR in the new bit, then write back the new value.  The
2161 	 * register is determined by the upper 7 bits of the vector value and
2162 	 * the bit within that register are determined by the lower 5 bits of
2163 	 * the value.
2164 	 */
2165 	vector_reg = (vector >> 5) & 0x7F;
2166 	vector_bit = vector & 0x1F;
2167 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2168 }
2169 
2170 /**
2171  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2172  *  @hw: pointer to hardware structure
2173  *  @mc_addr_list: the list of new multicast addresses
2174  *  @mc_addr_count: number of addresses
2175  *  @next: iterator function to walk the multicast address list
2176  *  @clear: flag, when set clears the table beforehand
2177  *
2178  *  When the clear flag is set, the given list replaces any existing list.
2179  *  Hashes the given addresses into the multicast table.
2180  **/
ixgbe_update_mc_addr_list_generic(struct ixgbe_hw * hw,uint8_t * mc_addr_list,uint32_t mc_addr_count,ixgbe_mc_addr_itr next,bool clear)2181 int32_t ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *mc_addr_list,
2182 					  uint32_t mc_addr_count, ixgbe_mc_addr_itr next,
2183 					  bool clear)
2184 {
2185 	uint32_t i;
2186 	uint32_t vmdq;
2187 
2188 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2189 
2190 	/*
2191 	 * Set the new number of MC addresses that we are being requested to
2192 	 * use.
2193 	 */
2194 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2195 	hw->addr_ctrl.mta_in_use = 0;
2196 
2197 	/* Clear mta_shadow */
2198 	if (clear) {
2199 		DEBUGOUT(" Clearing MTA\n");
2200 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2201 	}
2202 
2203 	/* Update mta_shadow */
2204 	for (i = 0; i < mc_addr_count; i++) {
2205 		DEBUGOUT(" Adding the multicast addresses:\n");
2206 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2207 	}
2208 
2209 	/* Enable mta */
2210 	for (i = 0; i < hw->mac.mcft_size; i++)
2211 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2212 				      hw->mac.mta_shadow[i]);
2213 
2214 	if (hw->addr_ctrl.mta_in_use > 0)
2215 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2216 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2217 
2218 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2219 	return IXGBE_SUCCESS;
2220 }
2221 
2222 /**
2223  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2224  *  @hw: pointer to hardware structure
2225  *
2226  *  Enables multicast address in RAR and the use of the multicast hash table.
2227  **/
ixgbe_enable_mc_generic(struct ixgbe_hw * hw)2228 int32_t ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2229 {
2230 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2231 
2232 	DEBUGFUNC("ixgbe_enable_mc_generic");
2233 
2234 	if (a->mta_in_use > 0)
2235 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2236 				hw->mac.mc_filter_type);
2237 
2238 	return IXGBE_SUCCESS;
2239 }
2240 
2241 /**
2242  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2243  *  @hw: pointer to hardware structure
2244  *
2245  *  Disables multicast address in RAR and the use of the multicast hash table.
2246  **/
ixgbe_disable_mc_generic(struct ixgbe_hw * hw)2247 int32_t ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2248 {
2249 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2250 
2251 	DEBUGFUNC("ixgbe_disable_mc_generic");
2252 
2253 	if (a->mta_in_use > 0)
2254 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2255 
2256 	return IXGBE_SUCCESS;
2257 }
2258 
2259 /**
2260  *  ixgbe_fc_enable_generic - Enable flow control
2261  *  @hw: pointer to hardware structure
2262  *
2263  *  Enable flow control according to the current settings.
2264  **/
ixgbe_fc_enable_generic(struct ixgbe_hw * hw)2265 int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2266 {
2267 	int32_t ret_val = IXGBE_SUCCESS;
2268 	uint32_t mflcn_reg, fccfg_reg;
2269 	uint32_t reg;
2270 	uint32_t fcrtl, fcrth;
2271 	int i;
2272 
2273 	DEBUGFUNC("ixgbe_fc_enable_generic");
2274 
2275 	/* Validate the water mark configuration */
2276 	if (!hw->fc.pause_time) {
2277 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2278 		goto out;
2279 	}
2280 
2281 	/* Low water mark of zero causes XOFF floods */
2282 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2283 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2284 		    hw->fc.high_water[i]) {
2285 			if (!hw->fc.low_water[i] ||
2286 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2287 				DEBUGOUT("Invalid water mark configuration\n");
2288 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2289 				goto out;
2290 			}
2291 		}
2292 	}
2293 
2294 	/* Negotiate the fc mode to use */
2295 	hw->mac.ops.fc_autoneg(hw);
2296 
2297 	/* Disable any previous flow control settings */
2298 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2299 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2300 
2301 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2302 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2303 
2304 	/*
2305 	 * The possible values of fc.current_mode are:
2306 	 * 0: Flow control is completely disabled
2307 	 * 1: Rx flow control is enabled (we can receive pause frames,
2308 	 *    but not send pause frames).
2309 	 * 2: Tx flow control is enabled (we can send pause frames but
2310 	 *    we do not support receiving pause frames).
2311 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2312 	 * other: Invalid.
2313 	 */
2314 	switch (hw->fc.current_mode) {
2315 	case ixgbe_fc_none:
2316 		/*
2317 		 * Flow control is disabled by software override or autoneg.
2318 		 * The code below will actually disable it in the HW.
2319 		 */
2320 		break;
2321 	case ixgbe_fc_rx_pause:
2322 		/*
2323 		 * Rx Flow control is enabled and Tx Flow control is
2324 		 * disabled by software override. Since there really
2325 		 * isn't a way to advertise that we are capable of RX
2326 		 * Pause ONLY, we will advertise that we support both
2327 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2328 		 * disable the adapter's ability to send PAUSE frames.
2329 		 */
2330 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2331 		break;
2332 	case ixgbe_fc_tx_pause:
2333 		/*
2334 		 * Tx Flow control is enabled, and Rx Flow control is
2335 		 * disabled by software override.
2336 		 */
2337 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2338 		break;
2339 	case ixgbe_fc_full:
2340 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2341 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2342 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2343 		break;
2344 	default:
2345 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2346 			     "Flow control param set incorrectly\n");
2347 		ret_val = IXGBE_ERR_CONFIG;
2348 		goto out;
2349 		break;
2350 	}
2351 
2352 	/* Set 802.3x based flow control settings. */
2353 	mflcn_reg |= IXGBE_MFLCN_DPF;
2354 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2355 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2356 
2357 
2358 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2359 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2360 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2361 		    hw->fc.high_water[i]) {
2362 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2363 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2364 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2365 		} else {
2366 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2367 			/*
2368 			 * In order to prevent Tx hangs when the internal Tx
2369 			 * switch is enabled we must set the high water mark
2370 			 * to the Rx packet buffer size - 24KB.  This allows
2371 			 * the Tx switch to function even under heavy Rx
2372 			 * workloads.
2373 			 */
2374 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 0x6000;
2375 		}
2376 
2377 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2378 	}
2379 
2380 	/* Configure pause time (2 TCs per register) */
2381 	reg = (uint32_t)hw->fc.pause_time * 0x00010001;
2382 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2383 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2384 
2385 	/* Configure flow control refresh threshold value */
2386 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2387 
2388 out:
2389 	return ret_val;
2390 }
2391 
2392 /**
2393  *  ixgbe_negotiate_fc - Negotiate flow control
2394  *  @hw: pointer to hardware structure
2395  *  @adv_reg: flow control advertised settings
2396  *  @lp_reg: link partner's flow control settings
2397  *  @adv_sym: symmetric pause bit in advertisement
2398  *  @adv_asm: asymmetric pause bit in advertisement
2399  *  @lp_sym: symmetric pause bit in link partner advertisement
2400  *  @lp_asm: asymmetric pause bit in link partner advertisement
2401  *
2402  *  Find the intersection between advertised settings and link partner's
2403  *  advertised settings
2404  **/
ixgbe_negotiate_fc(struct ixgbe_hw * hw,uint32_t adv_reg,uint32_t lp_reg,uint32_t adv_sym,uint32_t adv_asm,uint32_t lp_sym,uint32_t lp_asm)2405 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
2406 			   uint32_t lp_reg, uint32_t adv_sym,
2407 			   uint32_t adv_asm, uint32_t lp_sym,
2408 			   uint32_t lp_asm)
2409 {
2410 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2411 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2412 			     "Local or link partner's advertised flow control "
2413 			     "settings are NULL. Local: %x, link partner: %x\n",
2414 			     adv_reg, lp_reg);
2415 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2416 	}
2417 
2418 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2419 		/*
2420 		 * Now we need to check if the user selected Rx ONLY
2421 		 * of pause frames.  In this case, we had to advertise
2422 		 * FULL flow control because we could not advertise RX
2423 		 * ONLY. Hence, we must now check to see if we need to
2424 		 * turn OFF the TRANSMISSION of PAUSE frames.
2425 		 */
2426 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2427 			hw->fc.current_mode = ixgbe_fc_full;
2428 			DEBUGOUT("Flow Control = FULL.\n");
2429 		} else {
2430 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2431 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2432 		}
2433 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2434 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2435 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2436 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2437 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2438 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2439 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2440 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2441 	} else {
2442 		hw->fc.current_mode = ixgbe_fc_none;
2443 		DEBUGOUT("Flow Control = NONE.\n");
2444 	}
2445 	return IXGBE_SUCCESS;
2446 }
2447 
2448 /**
2449  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2450  *  @hw: pointer to hardware structure
2451  *
2452  *  Enable flow control according on 1 gig fiber.
2453  **/
ixgbe_fc_autoneg_fiber(struct ixgbe_hw * hw)2454 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2455 {
2456 	uint32_t pcs_anadv_reg, pcs_lpab_reg, linkstat;
2457 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2458 
2459 	/*
2460 	 * On multispeed fiber at 1g, bail out if
2461 	 * - link is up but AN did not complete, or if
2462 	 * - link is up and AN completed but timed out
2463 	 */
2464 
2465 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2466 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2467 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2468 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2469 		goto out;
2470 	}
2471 
2472 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2473 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2474 
2475 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2476 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2477 				      IXGBE_PCS1GANA_ASM_PAUSE,
2478 				      IXGBE_PCS1GANA_SYM_PAUSE,
2479 				      IXGBE_PCS1GANA_ASM_PAUSE);
2480 
2481 out:
2482 	return ret_val;
2483 }
2484 
2485 /**
2486  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2487  *  @hw: pointer to hardware structure
2488  *
2489  *  Enable flow control according to IEEE clause 37.
2490  **/
ixgbe_fc_autoneg_backplane(struct ixgbe_hw * hw)2491 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2492 {
2493 	uint32_t links2, anlp1_reg, autoc_reg, links;
2494 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2495 
2496 	/*
2497 	 * On backplane, bail out if
2498 	 * - backplane autoneg was not completed, or if
2499 	 * - we are 82599 and link partner is not AN enabled
2500 	 */
2501 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2502 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2503 		DEBUGOUT("Auto-Negotiation did not complete\n");
2504 		goto out;
2505 	}
2506 
2507 	if (hw->mac.type == ixgbe_mac_82599EB) {
2508 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2509 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2510 			DEBUGOUT("Link partner is not AN enabled\n");
2511 			goto out;
2512 		}
2513 	}
2514 	/*
2515 	 * Read the 10g AN autoc and LP ability registers and resolve
2516 	 * local flow control settings accordingly
2517 	 */
2518 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2519 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2520 
2521 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2522 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2523 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2524 
2525 out:
2526 	return ret_val;
2527 }
2528 
2529 /**
2530  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2531  *  @hw: pointer to hardware structure
2532  *
2533  *  Enable flow control according to IEEE clause 37.
2534  **/
ixgbe_fc_autoneg_copper(struct ixgbe_hw * hw)2535 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2536 {
2537 	uint16_t technology_ability_reg = 0;
2538 	uint16_t lp_technology_ability_reg = 0;
2539 
2540 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2541 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2542 			     &technology_ability_reg);
2543 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2544 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2545 			     &lp_technology_ability_reg);
2546 
2547 	return ixgbe_negotiate_fc(hw, (uint32_t)technology_ability_reg,
2548 				  (uint32_t)lp_technology_ability_reg,
2549 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2550 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2551 }
2552 
2553 /**
2554  *  ixgbe_fc_autoneg - Configure flow control
2555  *  @hw: pointer to hardware structure
2556  *
2557  *  Compares our advertised flow control capabilities to those advertised by
2558  *  our link partner, and determines the proper flow control mode to use.
2559  **/
ixgbe_fc_autoneg(struct ixgbe_hw * hw)2560 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2561 {
2562 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2563 	ixgbe_link_speed speed;
2564 	bool link_up;
2565 
2566 	DEBUGFUNC("ixgbe_fc_autoneg");
2567 
2568 	/*
2569 	 * AN should have completed when the cable was plugged in.
2570 	 * Look for reasons to bail out.  Bail out if:
2571 	 * - FC autoneg is disabled, or if
2572 	 * - link is not up.
2573 	 */
2574 	if (hw->fc.disable_fc_autoneg) {
2575 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2576 			     "Flow control autoneg is disabled");
2577 		goto out;
2578 	}
2579 
2580 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2581 	if (!link_up) {
2582 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
2583 		goto out;
2584 	}
2585 
2586 	switch (hw->phy.media_type) {
2587 	/* Autoneg flow control on fiber adapters */
2588 	case ixgbe_media_type_fiber_fixed:
2589 	case ixgbe_media_type_fiber_qsfp:
2590 	case ixgbe_media_type_fiber:
2591 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2592 			ret_val = ixgbe_fc_autoneg_fiber(hw);
2593 		break;
2594 
2595 	/* Autoneg flow control on backplane adapters */
2596 	case ixgbe_media_type_backplane:
2597 		ret_val = ixgbe_fc_autoneg_backplane(hw);
2598 		break;
2599 
2600 	/* Autoneg flow control on copper adapters */
2601 	case ixgbe_media_type_copper:
2602 		if (ixgbe_device_supports_autoneg_fc(hw))
2603 			ret_val = ixgbe_fc_autoneg_copper(hw);
2604 		break;
2605 
2606 	default:
2607 		break;
2608 	}
2609 
2610 out:
2611 	if (ret_val == IXGBE_SUCCESS) {
2612 		hw->fc.fc_was_autonegged = TRUE;
2613 	} else {
2614 		hw->fc.fc_was_autonegged = FALSE;
2615 		hw->fc.current_mode = hw->fc.requested_mode;
2616 	}
2617 }
2618 
2619 /*
2620  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2621  * @hw: pointer to hardware structure
2622  *
2623  * System-wide timeout range is encoded in PCIe Device Control2 register.
2624  *
2625  * Add 10% to specified maximum and return the number of times to poll for
2626  * completion timeout, in units of 100 microsec.  Never return less than
2627  * 800 = 80 millisec.
2628  */
ixgbe_pcie_timeout_poll(struct ixgbe_hw * hw)2629 static uint32_t ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2630 {
2631 	int16_t devctl2;
2632 	uint32_t pollcnt;
2633 
2634 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
2635 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2636 
2637 	switch (devctl2) {
2638 	case IXGBE_PCIDEVCTRL2_65_130ms:
2639 		pollcnt = 1300;		/* 130 millisec */
2640 		break;
2641 	case IXGBE_PCIDEVCTRL2_260_520ms:
2642 		pollcnt = 5200;		/* 520 millisec */
2643 		break;
2644 	case IXGBE_PCIDEVCTRL2_1_2s:
2645 		pollcnt = 20000;	/* 2 sec */
2646 		break;
2647 	case IXGBE_PCIDEVCTRL2_4_8s:
2648 		pollcnt = 80000;	/* 8 sec */
2649 		break;
2650 	case IXGBE_PCIDEVCTRL2_17_34s:
2651 		pollcnt = 34000;	/* 34 sec */
2652 		break;
2653 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
2654 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
2655 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
2656 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
2657 	default:
2658 		pollcnt = 800;		/* 80 millisec minimum */
2659 		break;
2660 	}
2661 
2662 	/* add 10% to spec maximum */
2663 	return (pollcnt * 11) / 10;
2664 }
2665 
2666 /**
2667  *  ixgbe_disable_pcie_master - Disable PCI-express master access
2668  *  @hw: pointer to hardware structure
2669  *
2670  *  Disables PCI-Express master access and verifies there are no pending
2671  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2672  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2673  *  is returned signifying master requests disabled.
2674  **/
ixgbe_disable_pcie_master(struct ixgbe_hw * hw)2675 int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2676 {
2677 	int32_t status = IXGBE_SUCCESS;
2678 	uint32_t i, poll;
2679 	uint16_t value;
2680 
2681 	DEBUGFUNC("ixgbe_disable_pcie_master");
2682 
2683 	/* Always set this bit to ensure any future transactions are blocked */
2684 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2685 
2686 	/* Exit if master requests are blocked */
2687 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
2688 	    IXGBE_REMOVED(hw->hw_addr))
2689 		goto out;
2690 
2691 	/* Poll for master request bit to clear */
2692 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2693 		usec_delay(100);
2694 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2695 			goto out;
2696 	}
2697 
2698 	/*
2699 	 * Two consecutive resets are required via CTRL.RST per datasheet
2700 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
2701 	 * of this need.  The first reset prevents new master requests from
2702 	 * being issued by our device.  We then must wait 1usec or more for any
2703 	 * remaining completions from the PCIe bus to trickle in, and then reset
2704 	 * again to clear out any effects they may have had on our device.
2705 	 */
2706 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2707 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2708 
2709 	if (hw->mac.type >= ixgbe_mac_X550)
2710 		goto out;
2711 
2712 	/*
2713 	 * Before proceeding, make sure that the PCIe block does not have
2714 	 * transactions pending.
2715 	 */
2716 	poll = ixgbe_pcie_timeout_poll(hw);
2717 	for (i = 0; i < poll; i++) {
2718 		usec_delay(100);
2719 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
2720 		if (IXGBE_REMOVED(hw->hw_addr))
2721 			goto out;
2722 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2723 			goto out;
2724 	}
2725 
2726 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
2727 		     "PCIe transaction pending bit also did not clear.\n");
2728 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2729 
2730 out:
2731 	return status;
2732 }
2733 
2734 /**
2735  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2736  *  @hw: pointer to hardware structure
2737  *  @mask: Mask to specify which semaphore to acquire
2738  *
2739  *  Acquires the SWFW semaphore through the GSSR register for the specified
2740  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2741  **/
ixgbe_acquire_swfw_sync(struct ixgbe_hw * hw,uint32_t mask)2742 int32_t ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, uint32_t mask)
2743 {
2744 	uint32_t gssr = 0;
2745 	uint32_t swmask = mask;
2746 	uint32_t fwmask = mask << 5;
2747 	uint32_t timeout = 200;
2748 	uint32_t i;
2749 
2750 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
2751 
2752 	for (i = 0; i < timeout; i++) {
2753 		/*
2754 		 * SW NVM semaphore bit is used for access to all
2755 		 * SW_FW_SYNC bits (not just NVM)
2756 		 */
2757 		if (ixgbe_get_eeprom_semaphore(hw))
2758 			return IXGBE_ERR_SWFW_SYNC;
2759 
2760 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2761 		if (!(gssr & (fwmask | swmask))) {
2762 			gssr |= swmask;
2763 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2764 			ixgbe_release_eeprom_semaphore(hw);
2765 			return IXGBE_SUCCESS;
2766 		} else {
2767 			/* Resource is currently in use by FW or SW */
2768 			ixgbe_release_eeprom_semaphore(hw);
2769 			msec_delay(5);
2770 		}
2771 	}
2772 
2773 	/* If time expired clear the bits holding the lock and retry */
2774 	if (gssr & (fwmask | swmask))
2775 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2776 
2777 	msec_delay(5);
2778 	return IXGBE_ERR_SWFW_SYNC;
2779 }
2780 
2781 /**
2782  *  ixgbe_release_swfw_sync - Release SWFW semaphore
2783  *  @hw: pointer to hardware structure
2784  *  @mask: Mask to specify which semaphore to release
2785  *
2786  *  Releases the SWFW semaphore through the GSSR register for the specified
2787  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2788  **/
ixgbe_release_swfw_sync(struct ixgbe_hw * hw,uint32_t mask)2789 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, uint32_t mask)
2790 {
2791 	uint32_t gssr;
2792 	uint32_t swmask = mask;
2793 
2794 	DEBUGFUNC("ixgbe_release_swfw_sync");
2795 
2796 	ixgbe_get_eeprom_semaphore(hw);
2797 
2798 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2799 	gssr &= ~swmask;
2800 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2801 
2802 	ixgbe_release_eeprom_semaphore(hw);
2803 }
2804 
2805 /**
2806  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2807  *  @hw: pointer to hardware structure
2808  *
2809  *  Stops the receive data path and waits for the HW to internally empty
2810  *  the Rx security block
2811  **/
ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw * hw)2812 int32_t ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2813 {
2814 #define IXGBE_MAX_SECRX_POLL 40
2815 
2816 	int i;
2817 	int secrxreg;
2818 
2819 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2820 
2821 
2822 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2823 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2824 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2825 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2826 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2827 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2828 			break;
2829 		else
2830 			/* Use interrupt-safe sleep just in case */
2831 			usec_delay(1000);
2832 	}
2833 
2834 	/* For informational purposes only */
2835 	if (i >= IXGBE_MAX_SECRX_POLL)
2836 		DEBUGOUT("Rx unit being enabled before security "
2837 			 "path fully disabled.  Continuing with init.\n");
2838 
2839 	return IXGBE_SUCCESS;
2840 }
2841 
2842 /**
2843  *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
2844  *  @hw: pointer to hardware structure
2845  *  @locked: bool to indicate whether the SW/FW lock was taken
2846  *  @reg_val: Value we read from AUTOC
2847  *
2848  *  The default case requires no protection so just to the register read.
2849  */
prot_autoc_read_generic(struct ixgbe_hw * hw,bool * locked,uint32_t * reg_val)2850 int32_t prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked,
2851 				uint32_t *reg_val)
2852 {
2853 	*locked = FALSE;
2854 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2855 	return IXGBE_SUCCESS;
2856 }
2857 
2858 /**
2859  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
2860  * @hw: pointer to hardware structure
2861  * @reg_val: value to write to AUTOC
2862  * @locked: bool to indicate whether the SW/FW lock was already taken by
2863  *           previous read.
2864  *
2865  * The default case requires no protection so just to the register write.
2866  */
prot_autoc_write_generic(struct ixgbe_hw * hw,uint32_t reg_val,bool locked)2867 int32_t prot_autoc_write_generic(struct ixgbe_hw *hw, uint32_t reg_val,
2868 				 bool locked)
2869 {
2870 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2871 	return IXGBE_SUCCESS;
2872 }
2873 
2874 /**
2875  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2876  *  @hw: pointer to hardware structure
2877  *
2878  *  Enables the receive data path.
2879  **/
ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw * hw)2880 int32_t ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2881 {
2882 	uint32_t secrxreg;
2883 
2884 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2885 
2886 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2887 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2888 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2889 	IXGBE_WRITE_FLUSH(hw);
2890 
2891 	return IXGBE_SUCCESS;
2892 }
2893 
2894 /**
2895  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2896  *  @hw: pointer to hardware structure
2897  *  @regval: register value to write to RXCTRL
2898  *
2899  *  Enables the Rx DMA unit
2900  **/
ixgbe_enable_rx_dma_generic(struct ixgbe_hw * hw,uint32_t regval)2901 int32_t ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, uint32_t regval)
2902 {
2903 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2904 
2905 	if (regval & IXGBE_RXCTRL_RXEN)
2906 		ixgbe_enable_rx(hw);
2907 	else
2908 		ixgbe_disable_rx(hw);
2909 
2910 	return IXGBE_SUCCESS;
2911 }
2912 
2913 /**
2914  *  ixgbe_blink_led_start_generic - Blink LED based on index.
2915  *  @hw: pointer to hardware structure
2916  *  @index: led number to blink
2917  **/
ixgbe_blink_led_start_generic(struct ixgbe_hw * hw,uint32_t index)2918 int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index)
2919 {
2920 	ixgbe_link_speed speed = 0;
2921 	bool link_up = 0;
2922 	uint32_t autoc_reg = 0;
2923 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2924 	int32_t ret_val = IXGBE_SUCCESS;
2925 	bool locked = FALSE;
2926 
2927 	DEBUGFUNC("ixgbe_blink_led_start_generic");
2928 
2929 	if (index > 3)
2930 		return IXGBE_ERR_PARAM;
2931 
2932 	/*
2933 	 * Link must be up to auto-blink the LEDs;
2934 	 * Force it if link is down.
2935 	 */
2936 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2937 
2938 	if (!link_up) {
2939 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2940 		if (ret_val != IXGBE_SUCCESS)
2941 			goto out;
2942 
2943 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2944 		autoc_reg |= IXGBE_AUTOC_FLU;
2945 
2946 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2947 		if (ret_val != IXGBE_SUCCESS)
2948 			goto out;
2949 
2950 		IXGBE_WRITE_FLUSH(hw);
2951 		msec_delay(10);
2952 	}
2953 
2954 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2955 	led_reg |= IXGBE_LED_BLINK(index);
2956 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2957 	IXGBE_WRITE_FLUSH(hw);
2958 
2959 out:
2960 	return ret_val;
2961 }
2962 
2963 /**
2964  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2965  *  @hw: pointer to hardware structure
2966  *  @index: led number to stop blinking
2967  **/
ixgbe_blink_led_stop_generic(struct ixgbe_hw * hw,uint32_t index)2968 int32_t ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, uint32_t index)
2969 {
2970 	uint32_t autoc_reg = 0;
2971 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2972 	int32_t ret_val = IXGBE_SUCCESS;
2973 	bool locked = FALSE;
2974 
2975 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
2976 
2977 	if (index > 3)
2978 		return IXGBE_ERR_PARAM;
2979 
2980 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2981 	if (ret_val != IXGBE_SUCCESS)
2982 		goto out;
2983 
2984 	autoc_reg &= ~IXGBE_AUTOC_FLU;
2985 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2986 
2987 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2988 	if (ret_val != IXGBE_SUCCESS)
2989 		goto out;
2990 
2991 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2992 	led_reg &= ~IXGBE_LED_BLINK(index);
2993 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2994 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2995 	IXGBE_WRITE_FLUSH(hw);
2996 
2997 out:
2998 	return ret_val;
2999 }
3000 
3001 /**
3002  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3003  *  @hw: pointer to hardware structure
3004  *
3005  *  Read PCIe configuration space, and get the MSI-X vector count from
3006  *  the capabilities table.
3007  **/
ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw * hw)3008 uint16_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3009 {
3010 	uint16_t msix_count = 1;
3011 	uint16_t max_msix_count;
3012 	uint16_t pcie_offset;
3013 
3014 	switch (hw->mac.type) {
3015 	case ixgbe_mac_82598EB:
3016 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3017 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3018 		break;
3019 	case ixgbe_mac_82599EB:
3020 	case ixgbe_mac_X540:
3021 	case ixgbe_mac_X550:
3022 	case ixgbe_mac_X550EM_x:
3023 	case ixgbe_mac_X550EM_a:
3024 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3025 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3026 		break;
3027 	default:
3028 		return msix_count;
3029 	}
3030 
3031 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3032 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3033 	if (IXGBE_REMOVED(hw->hw_addr))
3034 		msix_count = 0;
3035 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3036 
3037 	/* MSI-X count is zero-based in HW */
3038 	msix_count++;
3039 
3040 	if (msix_count > max_msix_count)
3041 		msix_count = max_msix_count;
3042 
3043 	return msix_count;
3044 }
3045 
3046 /**
3047  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3048  *  @hw: pointer to hardware structure
3049  *  @addr: Address to put into receive address register
3050  *  @vmdq: VMDq pool to assign
3051  *
3052  *  Puts an ethernet address into a receive address register, or
3053  *  finds the rar that it is already in; adds to the pool list
3054  **/
ixgbe_insert_mac_addr_generic(struct ixgbe_hw * hw,uint8_t * addr,uint32_t vmdq)3055 int32_t ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
3056 {
3057 	static const uint32_t NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3058 	uint32_t first_empty_rar = NO_EMPTY_RAR_FOUND;
3059 	uint32_t rar;
3060 	uint32_t rar_low, rar_high;
3061 	uint32_t addr_low, addr_high;
3062 
3063 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3064 
3065 	/* swap bytes for HW little endian */
3066 	addr_low  = addr[0] | (addr[1] << 8)
3067 			    | (addr[2] << 16)
3068 			    | (addr[3] << 24);
3069 	addr_high = addr[4] | (addr[5] << 8);
3070 
3071 	/*
3072 	 * Either find the mac_id in rar or find the first empty space.
3073 	 * rar_highwater points to just after the highest currently used
3074 	 * rar in order to shorten the search.  It grows when we add a new
3075 	 * rar to the top.
3076 	 */
3077 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3078 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3079 
3080 		if (((IXGBE_RAH_AV & rar_high) == 0)
3081 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3082 			first_empty_rar = rar;
3083 		} else if ((rar_high & 0xFFFF) == addr_high) {
3084 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3085 			if (rar_low == addr_low)
3086 				break;    /* found it already in the rars */
3087 		}
3088 	}
3089 
3090 	if (rar < hw->mac.rar_highwater) {
3091 		/* already there so just add to the pool bits */
3092 		ixgbe_set_vmdq(hw, rar, vmdq);
3093 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3094 		/* stick it into first empty RAR slot we found */
3095 		rar = first_empty_rar;
3096 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3097 	} else if (rar == hw->mac.rar_highwater) {
3098 		/* add it to the top of the list and inc the highwater mark */
3099 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3100 		hw->mac.rar_highwater++;
3101 	} else if (rar >= hw->mac.num_rar_entries) {
3102 		return IXGBE_ERR_INVALID_MAC_ADDR;
3103 	}
3104 
3105 	/*
3106 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3107 	 * remains cleared to be sure default pool packets will get delivered
3108 	 */
3109 	if (rar == 0)
3110 		ixgbe_clear_vmdq(hw, rar, 0);
3111 
3112 	return rar;
3113 }
3114 
3115 /**
3116  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3117  *  @hw: pointer to hardware struct
3118  *  @rar: receive address register index to disassociate
3119  *  @vmdq: VMDq pool index to remove from the rar
3120  **/
ixgbe_clear_vmdq_generic(struct ixgbe_hw * hw,uint32_t rar,uint32_t vmdq)3121 int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3122 {
3123 	uint32_t mpsar_lo, mpsar_hi;
3124 	uint32_t rar_entries = hw->mac.num_rar_entries;
3125 
3126 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3127 
3128 	/* Make sure we are using a valid rar index range */
3129 	if (rar >= rar_entries) {
3130 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3131 			     "RAR index %d is out of range.\n", rar);
3132 		return IXGBE_ERR_INVALID_ARGUMENT;
3133 	}
3134 
3135 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3136 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3137 
3138 	if (IXGBE_REMOVED(hw->hw_addr))
3139 		goto done;
3140 
3141 	if (!mpsar_lo && !mpsar_hi)
3142 		goto done;
3143 
3144 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3145 		if (mpsar_lo) {
3146 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3147 			mpsar_lo = 0;
3148 		}
3149 		if (mpsar_hi) {
3150 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3151 			mpsar_hi = 0;
3152 		}
3153 	} else if (vmdq < 32) {
3154 		mpsar_lo &= ~(1 << vmdq);
3155 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3156 	} else {
3157 		mpsar_hi &= ~(1 << (vmdq - 32));
3158 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3159 	}
3160 
3161 	/* was that the last pool using this rar? */
3162 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3163 		hw->mac.ops.clear_rar(hw, rar);
3164 done:
3165 	return IXGBE_SUCCESS;
3166 }
3167 
3168 /**
3169  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3170  *  @hw: pointer to hardware struct
3171  *  @rar: receive address register index to associate with a VMDq index
3172  *  @vmdq: VMDq pool index
3173  **/
ixgbe_set_vmdq_generic(struct ixgbe_hw * hw,uint32_t rar,uint32_t vmdq)3174 int32_t ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3175 {
3176 	uint32_t mpsar;
3177 	uint32_t rar_entries = hw->mac.num_rar_entries;
3178 
3179 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3180 
3181 	/* Make sure we are using a valid rar index range */
3182 	if (rar >= rar_entries) {
3183 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3184 			     "RAR index %d is out of range.\n", rar);
3185 		return IXGBE_ERR_INVALID_ARGUMENT;
3186 	}
3187 
3188 	if (vmdq < 32) {
3189 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3190 		mpsar |= 1 << vmdq;
3191 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3192 	} else {
3193 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3194 		mpsar |= 1 << (vmdq - 32);
3195 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3196 	}
3197 	return IXGBE_SUCCESS;
3198 }
3199 
3200 /**
3201  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3202  *  @hw: pointer to hardware structure
3203  **/
ixgbe_init_uta_tables_generic(struct ixgbe_hw * hw)3204 int32_t ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3205 {
3206 	int i;
3207 
3208 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3209 	DEBUGOUT(" Clearing UTA\n");
3210 
3211 	for (i = 0; i < 128; i++)
3212 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3213 
3214 	return IXGBE_SUCCESS;
3215 }
3216 
3217 /**
3218  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3219  *  @hw: pointer to hardware structure
3220  *  @vlan: VLAN id to write to VLAN filter
3221  *  @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if
3222  *		  vlanid not found
3223  *
3224  *
3225  *  return the VLVF index where this VLAN id should be placed
3226  *
3227  **/
ixgbe_find_vlvf_slot(struct ixgbe_hw * hw,uint32_t vlan,bool vlvf_bypass)3228 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan, bool vlvf_bypass)
3229 {
3230 	int32_t regindex, first_empty_slot;
3231 	uint32_t bits;
3232 
3233 	/* short cut the special case */
3234 	if (vlan == 0)
3235 		return 0;
3236 
3237 	/* if vlvf_bypass is set we don't want to use an empty slot, we
3238 	 * will simply bypass the VLVF if there are no entries present in the
3239 	 * VLVF that contain our VLAN
3240 	 */
3241 	first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3242 
3243 	/* add VLAN enable bit for comparison */
3244 	vlan |= IXGBE_VLVF_VIEN;
3245 
3246 	/* Search for the vlan id in the VLVF entries. Save off the first empty
3247 	 * slot found along the way.
3248 	 *
3249 	 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3250 	 */
3251 	for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3252 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3253 		if (bits == vlan)
3254 			return regindex;
3255 		if (!first_empty_slot && !bits)
3256 			first_empty_slot = regindex;
3257 	}
3258 
3259 	/* If we are here then we didn't find the VLAN.  Return first empty
3260 	 * slot we found during our search, else error.
3261 	 */
3262 	if (!first_empty_slot)
3263 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3264 
3265 	return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3266 }
3267 
3268 /**
3269  *  ixgbe_set_vfta_generic - Set VLAN filter table
3270  *  @hw: pointer to hardware structure
3271  *  @vlan: VLAN id to write to VLAN filter
3272  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
3273  *  @vlan_on: boolean flag to turn on/off VLAN
3274  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
3275  *
3276  *  Turn on/off specified VLAN in the VLAN filter table.
3277  **/
ixgbe_set_vfta_generic(struct ixgbe_hw * hw,uint32_t vlan,uint32_t vind,bool vlan_on,bool vlvf_bypass)3278 int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3279 			       bool vlan_on, bool vlvf_bypass)
3280 {
3281 	uint32_t regidx, vfta_delta, vfta;
3282 	int32_t ret_val;
3283 
3284 	DEBUGFUNC("ixgbe_set_vfta_generic");
3285 
3286 	if (vlan > 4095 || vind > 63)
3287 		return IXGBE_ERR_PARAM;
3288 
3289 	/*
3290 	 * this is a 2 part operation - first the VFTA, then the
3291 	 * VLVF and VLVFB if VT Mode is set
3292 	 * We don't write the VFTA until we know the VLVF part succeeded.
3293 	 */
3294 
3295 	/* Part 1
3296 	 * The VFTA is a bitstring made up of 128 32-bit registers
3297 	 * that enable the particular VLAN id, much like the MTA:
3298 	 *    bits[11-5]: which register
3299 	 *    bits[4-0]:  which bit in the register
3300 	 */
3301 	regidx = vlan / 32;
3302 	vfta_delta = 1 << (vlan % 32);
3303 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3304 
3305 	/*
3306 	 * vfta_delta represents the difference between the current value
3307 	 * of vfta and the value we want in the register.  Since the diff
3308 	 * is an XOR mask we can just update the vfta using an XOR
3309 	 */
3310 	vfta_delta &= vlan_on ? ~vfta : vfta;
3311 	vfta ^= vfta_delta;
3312 
3313 	/* Part 2
3314 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3315 	 */
3316 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
3317 					 vfta, vlvf_bypass);
3318 	if (ret_val != IXGBE_SUCCESS) {
3319 		if (vlvf_bypass)
3320 			goto vfta_update;
3321 		return ret_val;
3322 	}
3323 
3324 vfta_update:
3325 	/* Update VFTA now that we are ready for traffic */
3326 	if (vfta_delta)
3327 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3328 
3329 	return IXGBE_SUCCESS;
3330 }
3331 
3332 /**
3333  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3334  *  @hw: pointer to hardware structure
3335  *  @vlan: VLAN id to write to VLAN filter
3336  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
3337  *  @vlan_on: boolean flag to turn on/off VLAN in VLVF
3338  *  @vfta_delta: pointer to the difference between the current value of VFTA
3339  *		 and the desired value
3340  *  @vfta: the desired value of the VFTA
3341  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
3342  *
3343  *  Turn on/off specified bit in VLVF table.
3344  **/
ixgbe_set_vlvf_generic(struct ixgbe_hw * hw,uint32_t vlan,uint32_t vind,bool vlan_on,uint32_t * vfta_delta,uint32_t vfta,bool vlvf_bypass)3345 int32_t ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3346 			       bool vlan_on, uint32_t *vfta_delta, uint32_t vfta,
3347 			       bool vlvf_bypass)
3348 {
3349 	uint32_t bits;
3350 	int32_t vlvf_index;
3351 
3352 	DEBUGFUNC("ixgbe_set_vlvf_generic");
3353 
3354 	if (vlan > 4095 || vind > 63)
3355 		return IXGBE_ERR_PARAM;
3356 
3357 	/* If VT Mode is set
3358 	 *   Either vlan_on
3359 	 *     make sure the vlan is in VLVF
3360 	 *     set the vind bit in the matching VLVFB
3361 	 *   Or !vlan_on
3362 	 *     clear the pool bit and possibly the vind
3363 	 */
3364 	if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
3365 		return IXGBE_SUCCESS;
3366 
3367 	vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
3368 	if (vlvf_index < 0)
3369 		return vlvf_index;
3370 
3371 	bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
3372 
3373 	/* set the pool bit */
3374 	bits |= 1 << (vind % 32);
3375 	if (vlan_on)
3376 		goto vlvf_update;
3377 
3378 	/* clear the pool bit */
3379 	bits ^= 1 << (vind % 32);
3380 
3381 	if (!bits &&
3382 	    !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
3383 		/* Clear VFTA first, then disable VLVF.  Otherwise
3384 		 * we run the risk of stray packets leaking into
3385 		 * the PF via the default pool
3386 		 */
3387 		if (*vfta_delta)
3388 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
3389 
3390 		/* disable VLVF and clear remaining bit from pool */
3391 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3392 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
3393 
3394 		return IXGBE_SUCCESS;
3395 	}
3396 
3397 	/* If there are still bits set in the VLVFB registers
3398 	 * for the VLAN ID indicated we need to see if the
3399 	 * caller is requesting that we clear the VFTA entry bit.
3400 	 * If the caller has requested that we clear the VFTA
3401 	 * entry bit but there are still pools/VFs using this VLAN
3402 	 * ID entry then ignore the request.  We're not worried
3403 	 * about the case where we're turning the VFTA VLAN ID
3404 	 * entry bit on, only when requested to turn it off as
3405 	 * there may be multiple pools and/or VFs using the
3406 	 * VLAN ID entry.  In that case we cannot clear the
3407 	 * VFTA bit until all pools/VFs using that VLAN ID have also
3408 	 * been cleared.  This will be indicated by "bits" being
3409 	 * zero.
3410 	 */
3411 	*vfta_delta = 0;
3412 
3413 vlvf_update:
3414 	/* record pool change and enable VLAN ID if not already enabled */
3415 	IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
3416 	IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
3417 
3418 	return IXGBE_SUCCESS;
3419 }
3420 
3421 /**
3422  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
3423  *  @hw: pointer to hardware structure
3424  *
3425  *  Clears the VLAN filer table, and the VMDq index associated with the filter
3426  **/
ixgbe_clear_vfta_generic(struct ixgbe_hw * hw)3427 int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3428 {
3429 	uint32_t offset;
3430 
3431 	DEBUGFUNC("ixgbe_clear_vfta_generic");
3432 
3433 	for (offset = 0; offset < hw->mac.vft_size; offset++)
3434 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3435 
3436 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3437 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3438 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3439 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3440 	}
3441 
3442 	return IXGBE_SUCCESS;
3443 }
3444 
3445 /**
3446  *  ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
3447  *  @hw: pointer to hardware structure
3448  *
3449  *  Contains the logic to identify if we need to verify link for the
3450  *  crosstalk fix
3451  **/
ixgbe_need_crosstalk_fix(struct ixgbe_hw * hw)3452 bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
3453 {
3454 
3455 	/* Does FW say we need the fix */
3456 	if (!hw->need_crosstalk_fix)
3457 		return FALSE;
3458 
3459 	/* Only consider SFP+ PHYs i.e. media type fiber */
3460 	switch (hw->mac.ops.get_media_type(hw)) {
3461 	case ixgbe_media_type_fiber:
3462 	case ixgbe_media_type_fiber_qsfp:
3463 		break;
3464 	default:
3465 		return FALSE;
3466 	}
3467 
3468 	return TRUE;
3469 }
3470 
3471 /**
3472  *  ixgbe_check_mac_link_generic - Determine link and speed status
3473  *  @hw: pointer to hardware structure
3474  *  @speed: pointer to link speed
3475  *  @link_up: TRUE when link is up
3476  *  @link_up_wait_to_complete: bool used to wait for link up or not
3477  *
3478  *  Reads the links register to determine if link is up and the current speed
3479  **/
ixgbe_check_mac_link_generic(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)3480 int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3481 				     bool *link_up, bool link_up_wait_to_complete)
3482 {
3483 	uint32_t links_reg, links_orig;
3484 	uint32_t i;
3485 
3486 	DEBUGFUNC("ixgbe_check_mac_link_generic");
3487 
3488 	/* If Crosstalk fix enabled do the sanity check of making sure
3489 	 * the SFP+ cage is full.
3490 	 */
3491 	if (ixgbe_need_crosstalk_fix(hw)) {
3492 		uint32_t sfp_cage_full;
3493 
3494 		switch (hw->mac.type) {
3495 		case ixgbe_mac_82599EB:
3496 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3497 					IXGBE_ESDP_SDP2;
3498 			break;
3499 		case ixgbe_mac_X550EM_x:
3500 		case ixgbe_mac_X550EM_a:
3501 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3502 					IXGBE_ESDP_SDP0;
3503 			break;
3504 		default:
3505 			/* sanity check - No SFP+ devices here */
3506 			sfp_cage_full = FALSE;
3507 			break;
3508 		}
3509 
3510 		if (!sfp_cage_full) {
3511 			*link_up = FALSE;
3512 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
3513 			return IXGBE_SUCCESS;
3514 		}
3515 	}
3516 
3517 	/* clear the old state */
3518 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3519 
3520 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3521 
3522 	if (links_orig != links_reg) {
3523 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
3524 			  links_orig, links_reg);
3525 	}
3526 
3527 	if (link_up_wait_to_complete) {
3528 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
3529 			if (links_reg & IXGBE_LINKS_UP) {
3530 				*link_up = TRUE;
3531 				break;
3532 			} else {
3533 				*link_up = FALSE;
3534 			}
3535 			msec_delay(100);
3536 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3537 		}
3538 	} else {
3539 		if (links_reg & IXGBE_LINKS_UP)
3540 			*link_up = TRUE;
3541 		else
3542 			*link_up = FALSE;
3543 	}
3544 
3545 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3546 	case IXGBE_LINKS_SPEED_10G_82599:
3547 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
3548 		if (hw->mac.type >= ixgbe_mac_X550) {
3549 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3550 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3551 		}
3552 		break;
3553 	case IXGBE_LINKS_SPEED_1G_82599:
3554 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
3555 		break;
3556 	case IXGBE_LINKS_SPEED_100_82599:
3557 		*speed = IXGBE_LINK_SPEED_100_FULL;
3558 		if (hw->mac.type == ixgbe_mac_X550) {
3559 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3560 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
3561 		}
3562 		break;
3563 	case IXGBE_LINKS_SPEED_10_X550EM_A:
3564 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
3565 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
3566 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
3567 			*speed = IXGBE_LINK_SPEED_10_FULL;
3568 		break;
3569 	default:
3570 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
3571 	}
3572 
3573 	return IXGBE_SUCCESS;
3574 }
3575 
3576 /**
3577  *  ixgbe_get_device_caps_generic - Get additional device capabilities
3578  *  @hw: pointer to hardware structure
3579  *  @device_caps: the EEPROM word with the extra device capabilities
3580  *
3581  *  This function will read the EEPROM location for the device capabilities,
3582  *  and return the word through device_caps.
3583  **/
ixgbe_get_device_caps_generic(struct ixgbe_hw * hw,uint16_t * device_caps)3584 int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps)
3585 {
3586 	DEBUGFUNC("ixgbe_get_device_caps_generic");
3587 
3588 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3589 
3590 	return IXGBE_SUCCESS;
3591 }
3592 
3593 /**
3594  *  ixgbe_calculate_checksum - Calculate checksum for buffer
3595  *  @buffer: pointer to EEPROM
3596  *  @length: size of EEPROM to calculate a checksum for
3597  *  Calculates the checksum for some buffer on a specified length.  The
3598  *  checksum calculated is returned.
3599  **/
ixgbe_calculate_checksum(uint8_t * buffer,uint32_t length)3600 uint8_t ixgbe_calculate_checksum(uint8_t *buffer, uint32_t length)
3601 {
3602 	uint32_t i;
3603 	uint8_t sum = 0;
3604 
3605 	DEBUGFUNC("ixgbe_calculate_checksum");
3606 
3607 	if (!buffer)
3608 		return 0;
3609 
3610 	for (i = 0; i < length; i++)
3611 		sum += buffer[i];
3612 
3613 	return (uint8_t) (0 - sum);
3614 }
3615 
3616 /**
3617  *  ixgbe_hic_unlocked - Issue command to manageability block unlocked
3618  *  @hw: pointer to the HW structure
3619  *  @buffer: command to write and where the return status will be placed
3620  *  @length: length of buffer, must be multiple of 4 bytes
3621  *  @timeout: time in ms to wait for command completion
3622  *
3623  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
3624  *  else returns semaphore error when encountering an error acquiring
3625  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
3626  *
3627  *  This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
3628  *  by the caller.
3629  **/
ixgbe_hic_unlocked(struct ixgbe_hw * hw,uint32_t * buffer,uint32_t length,uint32_t timeout)3630 int32_t ixgbe_hic_unlocked(struct ixgbe_hw *hw, uint32_t *buffer, uint32_t length,
3631 		       uint32_t timeout)
3632 {
3633 	uint32_t hicr, i, fwsts;
3634 	uint16_t dword_len;
3635 
3636 	DEBUGFUNC("ixgbe_hic_unlocked");
3637 
3638 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3639 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
3640 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3641 	}
3642 
3643 	/* Set bit 9 of FWSTS clearing FW reset indication */
3644 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
3645 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
3646 
3647 	/* Check that the host interface is enabled. */
3648 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3649 	if (!(hicr & IXGBE_HICR_EN)) {
3650 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3651 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3652 	}
3653 
3654 	/* Calculate length in DWORDs. We must be DWORD aligned */
3655 	if (length % sizeof(uint32_t)) {
3656 		DEBUGOUT("Buffer length failure, not aligned to dword");
3657 		return IXGBE_ERR_INVALID_ARGUMENT;
3658 	}
3659 
3660 	dword_len = length >> 2;
3661 
3662 	/* The device driver writes the relevant command block
3663 	 * into the ram area.
3664 	 */
3665 	for (i = 0; i < dword_len; i++)
3666 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3667 				      i, htole32(buffer[i]));
3668 
3669 	/* Setting this bit tells the ARC that a new command is pending. */
3670 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3671 
3672 	for (i = 0; i < timeout; i++) {
3673 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3674 		if (!(hicr & IXGBE_HICR_C))
3675 			break;
3676 		msec_delay(1);
3677 	}
3678 
3679 	/* Check command completion */
3680 	if ((timeout && i == timeout) ||
3681 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
3682 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
3683 			     "Command has failed with no status valid.\n");
3684 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3685 	}
3686 
3687 	return IXGBE_SUCCESS;
3688 }
3689 
3690 /**
3691  *  ixgbe_host_interface_command - Issue command to manageability block
3692  *  @hw: pointer to the HW structure
3693  *  @buffer: contains the command to write and where the return status will
3694  *   be placed
3695  *  @length: length of buffer, must be multiple of 4 bytes
3696  *  @timeout: time in ms to wait for command completion
3697  *  @return_data: read and return data from the buffer (TRUE) or not (FALSE)
3698  *   Needed because FW structures are big endian and decoding of
3699  *   these fields can be 8 bit or 16 bit based on command. Decoding
3700  *   is not easily understood without making a table of commands.
3701  *   So we will leave this up to the caller to read back the data
3702  *   in these cases.
3703  *
3704  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
3705  *  else returns semaphore error when encountering an error acquiring
3706  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
3707  **/
ixgbe_host_interface_command(struct ixgbe_hw * hw,uint32_t * buffer,uint32_t length,uint32_t timeout,bool return_data)3708 int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
3709 				 uint32_t length, uint32_t timeout, bool return_data)
3710 {
3711 	uint32_t hdr_size = sizeof(struct ixgbe_hic_hdr);
3712 	struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
3713 	uint16_t buf_len;
3714 	int32_t status;
3715 	uint32_t bi;
3716 	uint32_t dword_len;
3717 
3718 	DEBUGFUNC("ixgbe_host_interface_command");
3719 
3720 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3721 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
3722 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3723 	}
3724 
3725 	/* Take management host interface semaphore */
3726 	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3727 	if (status)
3728 		return status;
3729 
3730 	status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
3731 	if (status)
3732 		goto rel_out;
3733 
3734 	if (!return_data)
3735 		goto rel_out;
3736 
3737 	/* Calculate length in DWORDs */
3738 	dword_len = hdr_size >> 2;
3739 
3740 	/* first pull in the header so we know the buffer length */
3741 	for (bi = 0; bi < dword_len; bi++) {
3742 		buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3743 		    IXGBE_FLEX_MNG, bi));
3744 	}
3745 
3746 	/*
3747 	 * If there is any thing in data position pull it in
3748 	 * Read Flash command requires reading buffer length from
3749 	 * two byes instead of one byte
3750 	 */
3751 	if (resp->cmd == 0x30) {
3752 		for (; bi < dword_len + 2; bi++) {
3753 			buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3754 			    IXGBE_FLEX_MNG, bi));
3755 		}
3756 		buf_len = (((uint16_t)(resp->cmd_or_resp.ret_status) << 3)
3757 				  & 0xF00) | resp->buf_len;
3758 		hdr_size += (2 << 2);
3759 	} else {
3760 		buf_len = resp->buf_len;
3761 	}
3762 	if (!buf_len)
3763 		goto rel_out;
3764 
3765 	if (length < buf_len + hdr_size) {
3766 		DEBUGOUT("Buffer not large enough for reply message.\n");
3767 		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3768 		goto rel_out;
3769 	}
3770 
3771 	/* Calculate length in DWORDs, add 3 for odd lengths */
3772 	dword_len = (buf_len + 3) >> 2;
3773 
3774 	/* Pull in the rest of the buffer (bi is where we left off) */
3775 	for (; bi <= dword_len; bi++) {
3776 		buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3777 		    IXGBE_FLEX_MNG, bi));
3778 	}
3779 
3780 rel_out:
3781 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3782 
3783 	return status;
3784 }
3785 
3786 /**
3787  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
3788  * @hw: pointer to the hardware structure
3789  *
3790  * The 82599 and x540 MACs can experience issues if TX work is still pending
3791  * when a reset occurs.  This function prevents this by flushing the PCIe
3792  * buffers on the system.
3793  **/
ixgbe_clear_tx_pending(struct ixgbe_hw * hw)3794 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3795 {
3796 	uint32_t gcr_ext, hlreg0, i, poll;
3797 	uint16_t value;
3798 
3799 	/*
3800 	 * If double reset is not requested then all transactions should
3801 	 * already be clear and as such there is no work to do
3802 	 */
3803 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3804 		return;
3805 
3806 	/*
3807 	 * Set loopback enable to prevent any transmits from being sent
3808 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
3809 	 * has already been cleared.
3810 	 */
3811 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3812 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3813 
3814 	/* Wait for a last completion before clearing buffers */
3815 	IXGBE_WRITE_FLUSH(hw);
3816 	msec_delay(3);
3817 
3818 	/*
3819 	 * Before proceeding, make sure that the PCIe block does not have
3820 	 * transactions pending.
3821 	 */
3822 	poll = ixgbe_pcie_timeout_poll(hw);
3823 	for (i = 0; i < poll; i++) {
3824 		usec_delay(100);
3825 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3826 		if (IXGBE_REMOVED(hw->hw_addr))
3827 			goto out;
3828 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3829 			goto out;
3830 	}
3831 
3832 out:
3833 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
3834 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3835 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3836 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3837 
3838 	/* Flush all writes and allow 20usec for all transactions to clear */
3839 	IXGBE_WRITE_FLUSH(hw);
3840 	usec_delay(20);
3841 
3842 	/* restore previous register values */
3843 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3844 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3845 }
3846 
ixgbe_disable_rx_generic(struct ixgbe_hw * hw)3847 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
3848 {
3849 	uint32_t pfdtxgswc;
3850 	uint32_t rxctrl;
3851 
3852 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3853 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
3854 		if (hw->mac.type != ixgbe_mac_82598EB) {
3855 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3856 			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3857 				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3858 				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3859 				hw->mac.set_lben = TRUE;
3860 			} else {
3861 				hw->mac.set_lben = FALSE;
3862 			}
3863 		}
3864 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
3865 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3866 	}
3867 }
3868 
ixgbe_enable_rx_generic(struct ixgbe_hw * hw)3869 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
3870 {
3871 	uint32_t pfdtxgswc;
3872 	uint32_t rxctrl;
3873 
3874 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3875 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
3876 
3877 	if (hw->mac.type != ixgbe_mac_82598EB) {
3878 		if (hw->mac.set_lben) {
3879 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3880 			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
3881 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3882 			hw->mac.set_lben = FALSE;
3883 		}
3884 	}
3885 }
3886 
3887 /**
3888  * ixgbe_mng_present - returns TRUE when management capability is present
3889  * @hw: pointer to hardware structure
3890  */
ixgbe_mng_present(struct ixgbe_hw * hw)3891 bool ixgbe_mng_present(struct ixgbe_hw *hw)
3892 {
3893 	uint32_t fwsm;
3894 
3895 	if (hw->mac.type < ixgbe_mac_82599EB)
3896 		return FALSE;
3897 
3898 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
3899 
3900 	return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
3901 }
3902 
3903 /**
3904  * ixgbe_mng_enabled - Is the manageability engine enabled?
3905  * @hw: pointer to hardware structure
3906  *
3907  * Returns TRUE if the manageability engine is enabled.
3908  **/
ixgbe_mng_enabled(struct ixgbe_hw * hw)3909 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
3910 {
3911 	uint32_t fwsm, manc, factps;
3912 
3913 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
3914 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
3915 		return FALSE;
3916 
3917 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
3918 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
3919 		return FALSE;
3920 
3921 	if (hw->mac.type <= ixgbe_mac_X540) {
3922 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
3923 		if (factps & IXGBE_FACTPS_MNGCG)
3924 			return FALSE;
3925 	}
3926 
3927 	return TRUE;
3928 }
3929 
3930 /**
3931  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
3932  *  @hw: pointer to hardware structure
3933  *  @speed: new link speed
3934  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
3935  *
3936  *  Set the link speed in the MAC and/or PHY register and restarts link.
3937  **/
ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)3938 int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
3939 					      ixgbe_link_speed speed,
3940 					      bool autoneg_wait_to_complete)
3941 {
3942 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3943 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3944 	int32_t status = IXGBE_SUCCESS;
3945 	uint32_t speedcnt = 0;
3946 	uint32_t i = 0;
3947 	bool autoneg, link_up = FALSE;
3948 
3949 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
3950 
3951 	/* Mask off requested but non-supported speeds */
3952 	status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
3953 	if (status != IXGBE_SUCCESS)
3954 		return status;
3955 
3956 	speed &= link_speed;
3957 
3958 	/* Try each speed one by one, highest priority first.  We do this in
3959 	 * software because 10Gb fiber doesn't support speed autonegotiation.
3960 	 */
3961 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
3962 		speedcnt++;
3963 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
3964 
3965 		/* Set the module link speed */
3966 		switch (hw->phy.media_type) {
3967 		case ixgbe_media_type_fiber_fixed:
3968 		case ixgbe_media_type_fiber:
3969 			if (hw->mac.ops.set_rate_select_speed)
3970 				hw->mac.ops.set_rate_select_speed(hw,
3971 				    IXGBE_LINK_SPEED_10GB_FULL);
3972 			break;
3973 		case ixgbe_media_type_fiber_qsfp:
3974 			/* QSFP module automatically detects MAC link speed */
3975 			break;
3976 		default:
3977 			DEBUGOUT("Unexpected media type.\n");
3978 			break;
3979 		}
3980 
3981 		/* Allow module to change analog characteristics (1G->10G) */
3982 		msec_delay(40);
3983 
3984 		if (!hw->mac.ops.setup_mac_link)
3985 			return IXGBE_NOT_IMPLEMENTED;
3986 		status = hw->mac.ops.setup_mac_link(hw,
3987 						    IXGBE_LINK_SPEED_10GB_FULL,
3988 						    autoneg_wait_to_complete);
3989 		if (status != IXGBE_SUCCESS)
3990 			return status;
3991 
3992 		/* Flap the Tx laser if it has not already been done */
3993 		ixgbe_flap_tx_laser(hw);
3994 
3995 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
3996 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
3997 		 * attempted.  82599 uses the same timing for 10g SFI.
3998 		 */
3999 		for (i = 0; i < 5; i++) {
4000 			/* Wait for the link partner to also set speed */
4001 			msec_delay(100);
4002 
4003 			/* If we have link, just jump out */
4004 			status = ixgbe_check_link(hw, &link_speed,
4005 						  &link_up, FALSE);
4006 			if (status != IXGBE_SUCCESS)
4007 				return status;
4008 
4009 			if (link_up)
4010 				goto out;
4011 		}
4012 	}
4013 
4014 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
4015 		speedcnt++;
4016 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
4017 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
4018 
4019 		/* Set the module link speed */
4020 		switch (hw->phy.media_type) {
4021 		case ixgbe_media_type_fiber_fixed:
4022 		case ixgbe_media_type_fiber:
4023 			if (hw->mac.ops.set_rate_select_speed)
4024 				hw->mac.ops.set_rate_select_speed(hw,
4025 				    IXGBE_LINK_SPEED_1GB_FULL);
4026 			break;
4027 		case ixgbe_media_type_fiber_qsfp:
4028 			/* QSFP module automatically detects link speed */
4029 			break;
4030 		default:
4031 			DEBUGOUT("Unexpected media type.\n");
4032 			break;
4033 		}
4034 
4035 		/* Allow module to change analog characteristics (10G->1G) */
4036 		msec_delay(40);
4037 
4038 		if (!hw->mac.ops.setup_mac_link)
4039 			return IXGBE_NOT_IMPLEMENTED;
4040 		status = hw->mac.ops.setup_mac_link(hw,
4041 						    IXGBE_LINK_SPEED_1GB_FULL,
4042 						    autoneg_wait_to_complete);
4043 		if (status != IXGBE_SUCCESS)
4044 			return status;
4045 
4046 		/* Flap the Tx laser if it has not already been done */
4047 		ixgbe_flap_tx_laser(hw);
4048 
4049 		/* Wait for the link partner to also set speed */
4050 		msec_delay(100);
4051 
4052 		/* If we have link, just jump out */
4053 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
4054 		if (status != IXGBE_SUCCESS)
4055 			return status;
4056 
4057 		if (link_up)
4058 			goto out;
4059 	}
4060 
4061 	/* We didn't get link.  Configure back to the highest speed we tried,
4062 	 * (if there was more than one).  We call ourselves back with just the
4063 	 * single highest speed that the user requested.
4064 	 */
4065 	if (speedcnt > 1)
4066 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
4067 						      highest_link_speed,
4068 						      autoneg_wait_to_complete);
4069 
4070 out:
4071 	/* Set autoneg_advertised value based on input link speed */
4072 	hw->phy.autoneg_advertised = 0;
4073 
4074 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4075 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
4076 
4077 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
4078 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
4079 
4080 	return status;
4081 }
4082 
4083 /**
4084  *  ixgbe_set_soft_rate_select_speed - Set module link speed
4085  *  @hw: pointer to hardware structure
4086  *  @speed: link speed to set
4087  *
4088  *  Set module link speed via the soft rate select.
4089  */
ixgbe_set_soft_rate_select_speed(struct ixgbe_hw * hw,ixgbe_link_speed speed)4090 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
4091 					ixgbe_link_speed speed)
4092 {
4093 	int32_t status;
4094 	uint8_t rs, eeprom_data;
4095 
4096 	switch (speed) {
4097 	case IXGBE_LINK_SPEED_10GB_FULL:
4098 		/* one bit mask same as setting on */
4099 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
4100 		break;
4101 	case IXGBE_LINK_SPEED_1GB_FULL:
4102 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
4103 		break;
4104 	default:
4105 		DEBUGOUT("Invalid fixed module speed\n");
4106 		return;
4107 	}
4108 
4109 	/* Set RS0 */
4110 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4111 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
4112 					   &eeprom_data);
4113 	if (status) {
4114 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
4115 		goto out;
4116 	}
4117 
4118 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4119 
4120 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4121 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
4122 					    eeprom_data);
4123 	if (status) {
4124 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
4125 		goto out;
4126 	}
4127 
4128 	/* Set RS1 */
4129 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4130 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
4131 					   &eeprom_data);
4132 	if (status) {
4133 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
4134 		goto out;
4135 	}
4136 
4137 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4138 
4139 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4140 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
4141 					    eeprom_data);
4142 	if (status) {
4143 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
4144 		goto out;
4145 	}
4146 out:
4147 	return;
4148 }
4149 
4150 /* MAC Operations */
4151 
4152 /**
4153  *  ixgbe_init_shared_code - Initialize the shared code
4154  *  @hw: pointer to hardware structure
4155  *
4156  *  This will assign function pointers and assign the MAC type and PHY code.
4157  *  Does not touch the hardware. This function must be called prior to any
4158  *  other function in the shared code. The ixgbe_hw structure should be
4159  *  memset to 0 prior to calling this function.  The following fields in
4160  *  hw structure should be filled in prior to calling this function:
4161  *  hw_addr, back, device_id, vendor_id, subsystem_device_id,
4162  *  subsystem_vendor_id, and revision_id
4163  **/
ixgbe_init_shared_code(struct ixgbe_hw * hw)4164 int32_t ixgbe_init_shared_code(struct ixgbe_hw *hw)
4165 {
4166 	int32_t status;
4167 
4168 	DEBUGFUNC("ixgbe_init_shared_code");
4169 
4170 	/*
4171 	 * Set the mac type
4172 	 */
4173 	ixgbe_set_mac_type(hw);
4174 
4175 	switch (hw->mac.type) {
4176 	case ixgbe_mac_82598EB:
4177 		status = ixgbe_init_ops_82598(hw);
4178 		break;
4179 	case ixgbe_mac_82599EB:
4180 		status = ixgbe_init_ops_82599(hw);
4181 		break;
4182 	case ixgbe_mac_X540:
4183 		status = ixgbe_init_ops_X540(hw);
4184 		break;
4185 	case ixgbe_mac_X550:
4186 		status = ixgbe_init_ops_X550(hw);
4187 		break;
4188 	case ixgbe_mac_X550EM_x:
4189 		status = ixgbe_init_ops_X550EM_x(hw);
4190 		break;
4191 	case ixgbe_mac_X550EM_a:
4192 		status = ixgbe_init_ops_X550EM_a(hw);
4193 		break;
4194 	default:
4195 		status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4196 		break;
4197 	}
4198 	hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
4199 
4200 	return status;
4201 }
4202 
4203 /**
4204  *  ixgbe_set_mac_type - Sets MAC type
4205  *  @hw: pointer to the HW structure
4206  *
4207  *  This function sets the mac type of the adapter based on the
4208  *  vendor ID and device ID stored in the hw structure.
4209  **/
ixgbe_set_mac_type(struct ixgbe_hw * hw)4210 int32_t ixgbe_set_mac_type(struct ixgbe_hw *hw)
4211 {
4212 	int32_t ret_val = IXGBE_SUCCESS;
4213 
4214 	DEBUGFUNC("ixgbe_set_mac_type\n");
4215 
4216 	if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) {
4217 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
4218 			     "Unsupported vendor id: %x", hw->vendor_id);
4219 		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4220 	}
4221 
4222 	hw->mvals = ixgbe_mvals_base;
4223 
4224 	switch (hw->device_id) {
4225 	case IXGBE_DEV_ID_82598:
4226 	case IXGBE_DEV_ID_82598_BX:
4227 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
4228 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
4229 	case IXGBE_DEV_ID_82598AT:
4230 	case IXGBE_DEV_ID_82598AT2:
4231 	case IXGBE_DEV_ID_82598AT_DUAL_PORT:
4232 	case IXGBE_DEV_ID_82598EB_CX4:
4233 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
4234 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
4235 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
4236 	case IXGBE_DEV_ID_82598EB_XF_LR:
4237 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
4238 		hw->mac.type = ixgbe_mac_82598EB;
4239 		break;
4240 	case IXGBE_DEV_ID_82599_KX4:
4241 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
4242 	case IXGBE_DEV_ID_82599_XAUI_LOM:
4243 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
4244 	case IXGBE_DEV_ID_82599_KR:
4245 	case IXGBE_DEV_ID_82599_SFP:
4246 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
4247 	case IXGBE_DEV_ID_82599_SFP_FCOE:
4248 	case IXGBE_DEV_ID_82599_SFP_EM:
4249 	case IXGBE_DEV_ID_82599_SFP_SF2:
4250 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
4251 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
4252 	case IXGBE_DEV_ID_82599EN_SFP:
4253 	case IXGBE_DEV_ID_82599_CX4:
4254 	case IXGBE_DEV_ID_82599_BYPASS:
4255 	case IXGBE_DEV_ID_82599_T3_LOM:
4256 		hw->mac.type = ixgbe_mac_82599EB;
4257 		break;
4258 	case IXGBE_DEV_ID_X540T:
4259 	case IXGBE_DEV_ID_X540T1:
4260 	case IXGBE_DEV_ID_X540_BYPASS:
4261 		hw->mac.type = ixgbe_mac_X540;
4262 		hw->mvals = ixgbe_mvals_X540;
4263 		break;
4264 	case IXGBE_DEV_ID_X550T:
4265 	case IXGBE_DEV_ID_X550T1:
4266 		hw->mac.type = ixgbe_mac_X550;
4267 		hw->mvals = ixgbe_mvals_X550;
4268 		break;
4269 	case IXGBE_DEV_ID_X550EM_X_KX4:
4270 	case IXGBE_DEV_ID_X550EM_X_KR:
4271 	case IXGBE_DEV_ID_X550EM_X_10G_T:
4272 	case IXGBE_DEV_ID_X550EM_X_1G_T:
4273 	case IXGBE_DEV_ID_X550EM_X_SFP:
4274 	case IXGBE_DEV_ID_X550EM_X_XFI:
4275 		hw->mac.type = ixgbe_mac_X550EM_x;
4276 		hw->mvals = ixgbe_mvals_X550EM_x;
4277 		break;
4278 	case IXGBE_DEV_ID_X550EM_A_KR:
4279 	case IXGBE_DEV_ID_X550EM_A_KR_L:
4280 	case IXGBE_DEV_ID_X550EM_A_SFP_N:
4281 	case IXGBE_DEV_ID_X550EM_A_SGMII:
4282 	case IXGBE_DEV_ID_X550EM_A_SGMII_L:
4283 	case IXGBE_DEV_ID_X550EM_A_1G_T:
4284 	case IXGBE_DEV_ID_X550EM_A_1G_T_L:
4285 	case IXGBE_DEV_ID_X550EM_A_10G_T:
4286 	case IXGBE_DEV_ID_X550EM_A_QSFP:
4287 	case IXGBE_DEV_ID_X550EM_A_QSFP_N:
4288 	case IXGBE_DEV_ID_X550EM_A_SFP:
4289 		hw->mac.type = ixgbe_mac_X550EM_a;
4290 		hw->mvals = ixgbe_mvals_X550EM_a;
4291 		break;
4292 	default:
4293 		ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4294 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
4295 			     "Unsupported device id: %x",
4296 			     hw->device_id);
4297 		break;
4298 	}
4299 
4300 	DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
4301 		  hw->mac.type, ret_val);
4302 	return ret_val;
4303 }
4304 
4305 /**
4306  *  ixgbe_init_hw - Initialize the hardware
4307  *  @hw: pointer to hardware structure
4308  *
4309  *  Initialize the hardware by resetting and then starting the hardware
4310  **/
ixgbe_init_hw(struct ixgbe_hw * hw)4311 int32_t ixgbe_init_hw(struct ixgbe_hw *hw)
4312 {
4313 	if (hw->mac.ops.init_hw)
4314 		return hw->mac.ops.init_hw(hw);
4315 	else
4316 		return IXGBE_NOT_IMPLEMENTED;
4317 }
4318 
4319 /**
4320  *  ixgbe_get_media_type - Get media type
4321  *  @hw: pointer to hardware structure
4322  *
4323  *  Returns the media type (fiber, copper, backplane)
4324  **/
ixgbe_get_media_type(struct ixgbe_hw * hw)4325 enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
4326 {
4327 	if (hw->mac.ops.get_media_type)
4328 		return hw->mac.ops.get_media_type(hw);
4329 	else
4330 		return ixgbe_media_type_unknown;
4331 }
4332 
4333 /**
4334  *  ixgbe_identify_phy - Get PHY type
4335  *  @hw: pointer to hardware structure
4336  *
4337  *  Determines the physical layer module found on the current adapter.
4338  **/
ixgbe_identify_phy(struct ixgbe_hw * hw)4339 int32_t ixgbe_identify_phy(struct ixgbe_hw *hw)
4340 {
4341 	int32_t status = IXGBE_SUCCESS;
4342 
4343 	if (hw->phy.type == ixgbe_phy_unknown) {
4344 		if (hw->phy.ops.identify)
4345 			status = hw->phy.ops.identify(hw);
4346 		else
4347 			status = IXGBE_NOT_IMPLEMENTED;
4348 	}
4349 
4350 	return status;
4351 }
4352 
4353 /**
4354  *  ixgbe_check_link - Get link and speed status
4355  *  @hw: pointer to hardware structure
4356  *  @speed: pointer to link speed
4357  *  @link_up: TRUE when link is up
4358  *  @link_up_wait_to_complete: bool used to wait for link up or not
4359  *
4360  *  Reads the links register to determine if link is up and the current speed
4361  **/
ixgbe_check_link(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4362 int32_t ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4363 			 bool *link_up, bool link_up_wait_to_complete)
4364 {
4365 	if (hw->mac.ops.check_link)
4366 		return hw->mac.ops.check_link(hw, speed, link_up,
4367 					      link_up_wait_to_complete);
4368 	else
4369 		return IXGBE_NOT_IMPLEMENTED;
4370 }
4371 
4372 /**
4373  *  ixgbe_flap_tx_laser - flap Tx laser to start autotry process
4374  *  @hw: pointer to hardware structure
4375  *
4376  *  When the driver changes the link speeds that it can support then
4377  *  flap the tx laser to alert the link partner to start autotry
4378  *  process on its end.
4379  **/
ixgbe_flap_tx_laser(struct ixgbe_hw * hw)4380 void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
4381 {
4382 	if (hw->mac.ops.flap_tx_laser)
4383 		hw->mac.ops.flap_tx_laser(hw);
4384 }
4385 
4386 /**
4387  *  ixgbe_set_rar - Set Rx address register
4388  *  @hw: pointer to hardware structure
4389  *  @index: Receive address register to write
4390  *  @addr: Address to put into receive address register
4391  *  @vmdq: VMDq "set"
4392  *  @enable_addr: set flag that address is active
4393  *
4394  *  Puts an ethernet address into a receive address register.
4395  **/
ixgbe_set_rar(struct ixgbe_hw * hw,uint32_t index,uint8_t * addr,uint32_t vmdq,uint32_t enable_addr)4396 int32_t ixgbe_set_rar(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
4397 		      uint32_t vmdq, uint32_t enable_addr)
4398 {
4399 	if (hw->mac.ops.set_rar)
4400 		return hw->mac.ops.set_rar(hw, index, addr, vmdq, enable_addr);
4401 	else
4402 		return IXGBE_NOT_IMPLEMENTED;
4403 }
4404 
4405 /**
4406  *  ixgbe_set_vmdq - Associate a VMDq index with a receive address
4407  *  @hw: pointer to hardware structure
4408  *  @rar: receive address register index to associate with VMDq index
4409  *  @vmdq: VMDq set or pool index
4410  **/
ixgbe_set_vmdq(struct ixgbe_hw * hw,uint32_t rar,uint32_t vmdq)4411 int32_t ixgbe_set_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
4412 {
4413 	if (hw->mac.ops.set_vmdq)
4414 		return hw->mac.ops.set_vmdq(hw, rar, vmdq);
4415 	else
4416 		return IXGBE_NOT_IMPLEMENTED;
4417 }
4418 
4419 /**
4420  *  ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
4421  *  @hw: pointer to hardware structure
4422  *  @rar: receive address register index to disassociate with VMDq index
4423  *  @vmdq: VMDq set or pool index
4424  **/
ixgbe_clear_vmdq(struct ixgbe_hw * hw,uint32_t rar,uint32_t vmdq)4425 int32_t ixgbe_clear_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
4426 {
4427 	if (hw->mac.ops.clear_vmdq)
4428 		return hw->mac.ops.clear_vmdq(hw, rar, vmdq);
4429 	else
4430 		return IXGBE_NOT_IMPLEMENTED;
4431 }
4432 
4433 /**
4434  *  ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
4435  *  @hw: pointer to hardware structure
4436  *
4437  *  Initializes the Unicast Table Arrays to zero on device load.  This
4438  *  is part of the Rx init addr execution path.
4439  **/
ixgbe_init_uta_tables(struct ixgbe_hw * hw)4440 int32_t ixgbe_init_uta_tables(struct ixgbe_hw *hw)
4441 {
4442 	if (hw->mac.ops.init_uta_tables)
4443 		return hw->mac.ops.init_uta_tables(hw);
4444 	else
4445 		return IXGBE_NOT_IMPLEMENTED;
4446 }
4447 
ixgbe_disable_rx(struct ixgbe_hw * hw)4448 void ixgbe_disable_rx(struct ixgbe_hw *hw)
4449 {
4450 	if (hw->mac.ops.disable_rx)
4451 		hw->mac.ops.disable_rx(hw);
4452 }
4453 
ixgbe_enable_rx(struct ixgbe_hw * hw)4454 void ixgbe_enable_rx(struct ixgbe_hw *hw)
4455 {
4456 	if (hw->mac.ops.enable_rx)
4457 		hw->mac.ops.enable_rx(hw);
4458 }
4459 
4460 /*
4461  * MBX: Mailbox handling
4462  */
4463 
4464 /**
4465  *  ixgbe_read_mbx - Reads a message from the mailbox
4466  *  @hw: pointer to the HW structure
4467  *  @msg: The message buffer
4468  *  @size: Length of buffer
4469  *  @mbx_id: id of mailbox to read
4470  *
4471  *  returns SUCCESS if it successfully read message from buffer
4472  **/
ixgbe_read_mbx(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)4473 int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4474 {
4475 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4476 	int32_t ret_val = IXGBE_ERR_MBX;
4477 
4478 	DEBUGFUNC("ixgbe_read_mbx");
4479 
4480 	/* limit read to size of mailbox */
4481 	if (size > mbx->size)
4482 		size = mbx->size;
4483 
4484 	if (mbx->ops.read)
4485 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
4486 
4487 	return ret_val;
4488 }
4489 
4490 /**
4491  *  ixgbe_write_mbx - Write a message to the mailbox
4492  *  @hw: pointer to the HW structure
4493  *  @msg: The message buffer
4494  *  @size: Length of buffer
4495  *  @mbx_id: id of mailbox to write
4496  *
4497  *  returns SUCCESS if it successfully copied message into the buffer
4498  **/
ixgbe_write_mbx(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)4499 int32_t ixgbe_write_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4500 {
4501 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4502 	int32_t ret_val = IXGBE_SUCCESS;
4503 
4504 	DEBUGFUNC("ixgbe_write_mbx");
4505 
4506 	if (size > mbx->size)
4507 		ret_val = IXGBE_ERR_MBX;
4508 
4509 	else if (mbx->ops.write)
4510 		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4511 
4512 	return ret_val;
4513 }
4514 
4515 /**
4516  *  ixgbe_check_for_msg - checks to see if someone sent us mail
4517  *  @hw: pointer to the HW structure
4518  *  @mbx_id: id of mailbox to check
4519  *
4520  *  returns SUCCESS if the Status bit was found or else ERR_MBX
4521  **/
ixgbe_check_for_msg(struct ixgbe_hw * hw,uint16_t mbx_id)4522 int32_t ixgbe_check_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
4523 {
4524 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4525 	int32_t ret_val = IXGBE_ERR_MBX;
4526 
4527 	DEBUGFUNC("ixgbe_check_for_msg");
4528 
4529 	if (mbx->ops.check_for_msg)
4530 		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
4531 
4532 	return ret_val;
4533 }
4534 
4535 /**
4536  *  ixgbe_check_for_ack - checks to see if someone sent us ACK
4537  *  @hw: pointer to the HW structure
4538  *  @mbx_id: id of mailbox to check
4539  *
4540  *  returns SUCCESS if the Status bit was found or else ERR_MBX
4541  **/
ixgbe_check_for_ack(struct ixgbe_hw * hw,uint16_t mbx_id)4542 int32_t ixgbe_check_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
4543 {
4544 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4545 	int32_t ret_val = IXGBE_ERR_MBX;
4546 
4547 	DEBUGFUNC("ixgbe_check_for_ack");
4548 
4549 	if (mbx->ops.check_for_ack)
4550 		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
4551 
4552 	return ret_val;
4553 }
4554 
4555 /**
4556  *  ixgbe_check_for_rst - checks to see if other side has reset
4557  *  @hw: pointer to the HW structure
4558  *  @mbx_id: id of mailbox to check
4559  *
4560  *  returns SUCCESS if the Status bit was found or else ERR_MBX
4561  **/
ixgbe_check_for_rst(struct ixgbe_hw * hw,uint16_t mbx_id)4562 int32_t ixgbe_check_for_rst(struct ixgbe_hw *hw, uint16_t mbx_id)
4563 {
4564 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4565 	int32_t ret_val = IXGBE_ERR_MBX;
4566 
4567 	DEBUGFUNC("ixgbe_check_for_rst");
4568 
4569 	if (mbx->ops.check_for_rst)
4570 		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
4571 
4572 	return ret_val;
4573 }
4574 
4575 /**
4576  *  ixgbe_poll_for_msg - Wait for message notification
4577  *  @hw: pointer to the HW structure
4578  *  @mbx_id: id of mailbox to write
4579  *
4580  *  returns SUCCESS if it successfully received a message notification
4581  **/
ixgbe_poll_for_msg(struct ixgbe_hw * hw,uint16_t mbx_id)4582 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
4583 {
4584 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4585 	int countdown = mbx->timeout;
4586 
4587 	DEBUGFUNC("ixgbe_poll_for_msg");
4588 
4589 	if (!countdown || !mbx->ops.check_for_msg)
4590 		goto out;
4591 
4592 	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
4593 		countdown--;
4594 		if (!countdown)
4595 			break;
4596 		usec_delay(mbx->usec_delay);
4597 	}
4598 
4599 	if (countdown == 0)
4600 		ERROR_REPORT2(IXGBE_ERROR_POLLING,
4601 			   "Polling for VF%d mailbox message timedout", mbx_id);
4602 
4603 out:
4604 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
4605 }
4606 
4607 /**
4608  *  ixgbe_poll_for_ack - Wait for message acknowledgement
4609  *  @hw: pointer to the HW structure
4610  *  @mbx_id: id of mailbox to write
4611  *
4612  *  returns SUCCESS if it successfully received a message acknowledgement
4613  **/
ixgbe_poll_for_ack(struct ixgbe_hw * hw,uint16_t mbx_id)4614 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
4615 {
4616 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4617 	int countdown = mbx->timeout;
4618 
4619 	DEBUGFUNC("ixgbe_poll_for_ack");
4620 
4621 	if (!countdown || !mbx->ops.check_for_ack)
4622 		goto out;
4623 
4624 	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
4625 		countdown--;
4626 		if (!countdown)
4627 			break;
4628 		usec_delay(mbx->usec_delay);
4629 	}
4630 
4631 	if (countdown == 0)
4632 		ERROR_REPORT2(IXGBE_ERROR_POLLING,
4633 			     "Polling for VF%d mailbox ack timedout", mbx_id);
4634 
4635 out:
4636 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
4637 }
4638 
4639 /**
4640  *  ixgbe_read_posted_mbx - Wait for message notification and receive message
4641  *  @hw: pointer to the HW structure
4642  *  @msg: The message buffer
4643  *  @size: Length of buffer
4644  *  @mbx_id: id of mailbox to write
4645  *
4646  *  returns SUCCESS if it successfully received a message notification and
4647  *  copied it into the receive buffer.
4648  **/
ixgbe_read_posted_mbx(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)4649 int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4650 {
4651 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4652 	int32_t ret_val = IXGBE_ERR_MBX;
4653 
4654 	DEBUGFUNC("ixgbe_read_posted_mbx");
4655 
4656 	if (!mbx->ops.read)
4657 		goto out;
4658 
4659 	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
4660 
4661 	/* if ack received read message, otherwise we timed out */
4662 	if (!ret_val)
4663 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
4664 out:
4665 	return ret_val;
4666 }
4667 
4668 /**
4669  *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
4670  *  @hw: pointer to the HW structure
4671  *  @msg: The message buffer
4672  *  @size: Length of buffer
4673  *  @mbx_id: id of mailbox to write
4674  *
4675  *  returns SUCCESS if it successfully copied message into the buffer and
4676  *  received an ack to that message within delay * timeout period
4677  **/
ixgbe_write_posted_mbx(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)4678 int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4679 			   uint16_t mbx_id)
4680 {
4681 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4682 	int32_t ret_val = IXGBE_ERR_MBX;
4683 
4684 	DEBUGFUNC("ixgbe_write_posted_mbx");
4685 
4686 	/* exit if either we can't write or there isn't a defined timeout */
4687 	if (!mbx->ops.write || !mbx->timeout)
4688 		goto out;
4689 
4690 	/* send msg */
4691 	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4692 
4693 	/* if msg sent wait until we receive an ack */
4694 	if (!ret_val)
4695 		ret_val = ixgbe_poll_for_ack(hw, mbx_id);
4696 out:
4697 	return ret_val;
4698 }
4699 
4700 /**
4701  *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
4702  *  @hw: pointer to the HW structure
4703  *
4704  *  Setups up the mailbox read and write message function pointers
4705  **/
ixgbe_init_mbx_ops_generic(struct ixgbe_hw * hw)4706 void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
4707 {
4708 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4709 
4710 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
4711 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
4712 }
4713 
4714 /**
4715  *  ixgbe_read_v2p_mailbox - read v2p mailbox
4716  *  @hw: pointer to the HW structure
4717  *
4718  *  This function is used to read the v2p mailbox without losing the read to
4719  *  clear status bits.
4720  **/
ixgbe_read_v2p_mailbox(struct ixgbe_hw * hw)4721 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
4722 {
4723 	uint32_t v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
4724 
4725 	v2p_mailbox |= hw->mbx.v2p_mailbox;
4726 	hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
4727 
4728 	return v2p_mailbox;
4729 }
4730 
ixgbe_check_for_bit_pf(struct ixgbe_hw * hw,uint32_t mask,int32_t index)4731 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index)
4732 {
4733 	uint32_t mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
4734 	int32_t ret_val = IXGBE_ERR_MBX;
4735 
4736 	if (mbvficr & mask) {
4737 		ret_val = IXGBE_SUCCESS;
4738 		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
4739 	}
4740 
4741 	return ret_val;
4742 }
4743 
4744 /**
4745  *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
4746  *  @hw: pointer to the HW structure
4747  *  @vf_number: the VF index
4748  *
4749  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4750  **/
ixgbe_check_for_msg_pf(struct ixgbe_hw * hw,uint16_t vf_number)4751 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4752 {
4753 	int32_t ret_val = IXGBE_ERR_MBX;
4754 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
4755 	uint32_t vf_bit = vf_number % 16;
4756 
4757 	DEBUGFUNC("ixgbe_check_for_msg_pf");
4758 
4759 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
4760 				    index)) {
4761 		ret_val = IXGBE_SUCCESS;
4762 		hw->mbx.stats.reqs++;
4763 	}
4764 
4765 	return ret_val;
4766 }
4767 
4768 /**
4769  *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
4770  *  @hw: pointer to the HW structure
4771  *  @vf_number: the VF index
4772  *
4773  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4774  **/
ixgbe_check_for_ack_pf(struct ixgbe_hw * hw,uint16_t vf_number)4775 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4776 {
4777 	int32_t ret_val = IXGBE_ERR_MBX;
4778 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
4779 	uint32_t vf_bit = vf_number % 16;
4780 
4781 	DEBUGFUNC("ixgbe_check_for_ack_pf");
4782 
4783 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
4784 				    index)) {
4785 		ret_val = IXGBE_SUCCESS;
4786 		hw->mbx.stats.acks++;
4787 	}
4788 
4789 	return ret_val;
4790 }
4791 
4792 /**
4793  *  ixgbe_check_for_rst_pf - checks to see if the VF has reset
4794  *  @hw: pointer to the HW structure
4795  *  @vf_number: the VF index
4796  *
4797  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4798  **/
ixgbe_check_for_rst_pf(struct ixgbe_hw * hw,uint16_t vf_number)4799 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4800 {
4801 	uint32_t reg_offset = (vf_number < 32) ? 0 : 1;
4802 	uint32_t vf_shift = vf_number % 32;
4803 	uint32_t vflre = 0;
4804 	int32_t ret_val = IXGBE_ERR_MBX;
4805 
4806 	DEBUGFUNC("ixgbe_check_for_rst_pf");
4807 
4808 	switch (hw->mac.type) {
4809 	case ixgbe_mac_82599EB:
4810 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
4811 		break;
4812 	case ixgbe_mac_X550:
4813 	case ixgbe_mac_X550EM_x:
4814 	case ixgbe_mac_X550EM_a:
4815 	case ixgbe_mac_X540:
4816 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
4817 		break;
4818 	default:
4819 		break;
4820 	}
4821 
4822 	if (vflre & (1 << vf_shift)) {
4823 		ret_val = IXGBE_SUCCESS;
4824 		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
4825 		hw->mbx.stats.rsts++;
4826 	}
4827 
4828 	return ret_val;
4829 }
4830 
4831 /**
4832  *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
4833  *  @hw: pointer to the HW structure
4834  *  @vf_number: the VF index
4835  *
4836  *  return SUCCESS if we obtained the mailbox lock
4837  **/
ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw * hw,uint16_t vf_number)4838 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4839 {
4840 	int32_t ret_val = IXGBE_ERR_MBX;
4841 	uint32_t p2v_mailbox;
4842 
4843 	DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
4844 
4845 	/* Take ownership of the buffer */
4846 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
4847 
4848 	/* reserve mailbox for vf use */
4849 	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
4850 	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
4851 		ret_val = IXGBE_SUCCESS;
4852 	else
4853 		ERROR_REPORT2(IXGBE_ERROR_POLLING,
4854 			   "Failed to obtain mailbox lock for VF%d", vf_number);
4855 
4856 
4857 	return ret_val;
4858 }
4859 
4860 /**
4861  *  ixgbe_write_mbx_pf - Places a message in the mailbox
4862  *  @hw: pointer to the HW structure
4863  *  @msg: The message buffer
4864  *  @size: Length of buffer
4865  *  @vf_number: the VF index
4866  *
4867  *  returns SUCCESS if it successfully copied message into the buffer
4868  **/
ixgbe_write_mbx_pf(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t vf_number)4869 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4870 			   uint16_t vf_number)
4871 {
4872 	int32_t ret_val;
4873 	uint16_t i;
4874 
4875 	DEBUGFUNC("ixgbe_write_mbx_pf");
4876 
4877 	/* lock the mailbox to prevent pf/vf race condition */
4878 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4879 	if (ret_val)
4880 		goto out_no_write;
4881 
4882 	/* flush msg and acks as we are overwriting the message buffer */
4883 	ixgbe_check_for_msg_pf(hw, vf_number);
4884 	ixgbe_check_for_ack_pf(hw, vf_number);
4885 
4886 	/* copy the caller specified message to the mailbox memory buffer */
4887 	for (i = 0; i < size; i++)
4888 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
4889 
4890 	/* Interrupt VF to tell it a message has been sent and release buffer*/
4891 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
4892 
4893 	/* update stats */
4894 	hw->mbx.stats.msgs_tx++;
4895 
4896 out_no_write:
4897 	return ret_val;
4898 
4899 }
4900 
4901 /**
4902  *  ixgbe_read_mbx_pf - Read a message from the mailbox
4903  *  @hw: pointer to the HW structure
4904  *  @msg: The message buffer
4905  *  @size: Length of buffer
4906  *  @vf_number: the VF index
4907  *
4908  *  This function copies a message from the mailbox buffer to the caller's
4909  *  memory buffer.  The presumption is that the caller knows that there was
4910  *  a message due to a VF request so no polling for message is needed.
4911  **/
ixgbe_read_mbx_pf(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t vf_number)4912 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4913 			  uint16_t vf_number)
4914 {
4915 	int32_t ret_val;
4916 	uint16_t i;
4917 
4918 	DEBUGFUNC("ixgbe_read_mbx_pf");
4919 
4920 	/* lock the mailbox to prevent pf/vf race condition */
4921 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4922 	if (ret_val)
4923 		goto out_no_read;
4924 
4925 	/* copy the message to the mailbox memory buffer */
4926 	for (i = 0; i < size; i++)
4927 		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
4928 
4929 	/* Acknowledge the message and release buffer */
4930 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
4931 
4932 	/* update stats */
4933 	hw->mbx.stats.msgs_rx++;
4934 
4935 out_no_read:
4936 	return ret_val;
4937 }
4938 
4939 /**
4940  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
4941  *  @hw: pointer to the HW structure
4942  *
4943  *  Initializes the hw->mbx struct to correct values for pf mailbox
4944  */
ixgbe_init_mbx_params_pf(struct ixgbe_hw * hw)4945 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
4946 {
4947 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4948 
4949 	if (hw->mac.type != ixgbe_mac_82599EB &&
4950 	    hw->mac.type != ixgbe_mac_X550 &&
4951 	    hw->mac.type != ixgbe_mac_X550EM_x &&
4952 	    hw->mac.type != ixgbe_mac_X550EM_a &&
4953 	    hw->mac.type != ixgbe_mac_X540)
4954 		return;
4955 
4956 	mbx->timeout = 0;
4957 	mbx->usec_delay = 0;
4958 
4959 	mbx->size = IXGBE_VFMAILBOX_SIZE;
4960 
4961 	mbx->ops.read = ixgbe_read_mbx_pf;
4962 	mbx->ops.write = ixgbe_write_mbx_pf;
4963 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
4964 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
4965 	mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
4966 	mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
4967 	mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
4968 
4969 	mbx->stats.msgs_tx = 0;
4970 	mbx->stats.msgs_rx = 0;
4971 	mbx->stats.reqs = 0;
4972 	mbx->stats.acks = 0;
4973 	mbx->stats.rsts = 0;
4974 }
4975