1 /* $OpenBSD: ixgbe.c,v 1.28 2024/10/27 04:44:41 yasuoka Exp $ */
2
3 /******************************************************************************
4 SPDX-License-Identifier: BSD-3-Clause
5
6 Copyright (c) 2001-2017, Intel Corporation
7 All rights reserved.
8
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
11
12 1. Redistributions of source code must retain the above copyright notice,
13 this list of conditions and the following disclaimer.
14
15 2. Redistributions in binary form must reproduce the above copyright
16 notice, this list of conditions and the following disclaimer in the
17 documentation and/or other materials provided with the distribution.
18
19 3. Neither the name of the Intel Corporation nor the names of its
20 contributors may be used to endorse or promote products derived from
21 this software without specific prior written permission.
22
23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 POSSIBILITY OF SUCH DAMAGE.
34
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 326022 2017-11-20 19:36:21Z pfg $*/
37 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.c 326022 2017-11-20 19:36:21Z pfg $*/
38
39 #include <dev/pci/ixgbe.h>
40 #include <dev/pci/ixgbe_type.h>
41
42 #ifdef __sparc64__
43 #include <dev/ofw/openfirm.h>
44 #endif
45
46 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
47 uint16_t link_status);
48
49 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
50 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
51 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
52 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw);
53 void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
54 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
55 uint16_t count);
56 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count);
57 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
58 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
59 void ixgbe_release_eeprom(struct ixgbe_hw *hw);
60
61 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr);
62 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
63 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
64 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
65 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
66
67 int32_t prot_autoc_read_generic(struct ixgbe_hw *, bool *, uint32_t *);
68 int32_t prot_autoc_write_generic(struct ixgbe_hw *, uint32_t, bool);
69
70 /* MBX */
71 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
72 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
73 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask,
74 int32_t index);
75 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number);
76 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number);
77 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number);
78 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number);
79 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
80 uint16_t vf_number);
81 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
82 uint16_t vf_number);
83
84 #define IXGBE_EMPTY_PARAM
85
86 static const uint32_t ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = {
87 IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM)
88 };
89
90 static const uint32_t ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
91 IXGBE_MVALS_INIT(_X540)
92 };
93
94 static const uint32_t ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
95 IXGBE_MVALS_INIT(_X550)
96 };
97
98 static const uint32_t ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
99 IXGBE_MVALS_INIT(_X550EM_x)
100 };
101
102 static const uint32_t ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = {
103 IXGBE_MVALS_INIT(_X550EM_a)
104 };
105
106 /**
107 * ixgbe_init_ops_generic - Inits function ptrs
108 * @hw: pointer to the hardware structure
109 *
110 * Initialize the function pointers.
111 **/
ixgbe_init_ops_generic(struct ixgbe_hw * hw)112 int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
113 {
114 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
115 struct ixgbe_mac_info *mac = &hw->mac;
116 uint32_t eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
117
118 DEBUGFUNC("ixgbe_init_ops_generic");
119
120 /* EEPROM */
121 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
122 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
123 if (eec & IXGBE_EEC_PRES)
124 eeprom->ops.read = ixgbe_read_eerd_generic;
125 else
126 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
127 eeprom->ops.write = ixgbe_write_eeprom_generic;
128 eeprom->ops.validate_checksum =
129 ixgbe_validate_eeprom_checksum_generic;
130 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
131 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
132
133 /* MAC */
134 mac->ops.init_hw = ixgbe_init_hw_generic;
135 mac->ops.reset_hw = NULL;
136 mac->ops.start_hw = ixgbe_start_hw_generic;
137 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
138 mac->ops.get_media_type = NULL;
139 mac->ops.get_supported_physical_layer = NULL;
140 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
141 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
142 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
143 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
144 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
145 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
146 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
147 mac->ops.prot_autoc_read = prot_autoc_read_generic;
148 mac->ops.prot_autoc_write = prot_autoc_write_generic;
149
150 /* LEDs */
151 mac->ops.led_on = ixgbe_led_on_generic;
152 mac->ops.led_off = ixgbe_led_off_generic;
153 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
154 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
155
156 /* RAR, Multicast, VLAN */
157 mac->ops.set_rar = ixgbe_set_rar_generic;
158 mac->ops.clear_rar = ixgbe_clear_rar_generic;
159 mac->ops.insert_mac_addr = NULL;
160 mac->ops.set_vmdq = NULL;
161 mac->ops.clear_vmdq = NULL;
162 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
163 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
164 mac->ops.enable_mc = ixgbe_enable_mc_generic;
165 mac->ops.disable_mc = ixgbe_disable_mc_generic;
166 mac->ops.clear_vfta = NULL;
167 mac->ops.set_vfta = NULL;
168 mac->ops.set_vlvf = NULL;
169 mac->ops.init_uta_tables = NULL;
170 mac->ops.enable_rx = ixgbe_enable_rx_generic;
171 mac->ops.disable_rx = ixgbe_disable_rx_generic;
172
173 /* Flow Control */
174 mac->ops.fc_enable = ixgbe_fc_enable_generic;
175 mac->ops.setup_fc = ixgbe_setup_fc_generic;
176 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
177
178 /* Link */
179 mac->ops.get_link_capabilities = NULL;
180 mac->ops.setup_link = NULL;
181 mac->ops.check_link = NULL;
182 mac->ops.dmac_config = NULL;
183 mac->ops.dmac_update_tcs = NULL;
184 mac->ops.dmac_config_tcs = NULL;
185
186 return IXGBE_SUCCESS;
187 }
188
189 /**
190 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
191 * of flow control
192 * @hw: pointer to hardware structure
193 *
194 * This function returns TRUE if the device supports flow control
195 * autonegotiation, and FALSE if it does not.
196 *
197 **/
ixgbe_device_supports_autoneg_fc(struct ixgbe_hw * hw)198 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
199 {
200 bool supported = FALSE;
201 ixgbe_link_speed speed;
202 bool link_up;
203
204 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
205
206 switch (hw->phy.media_type) {
207 case ixgbe_media_type_fiber_fixed:
208 case ixgbe_media_type_fiber_qsfp:
209 case ixgbe_media_type_fiber:
210 /* flow control autoneg black list */
211 switch (hw->device_id) {
212 case IXGBE_DEV_ID_X550EM_A_SFP:
213 case IXGBE_DEV_ID_X550EM_A_SFP_N:
214 case IXGBE_DEV_ID_X550EM_A_QSFP:
215 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
216 supported = FALSE;
217 break;
218 default:
219 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
220 /* if link is down, assume supported */
221 if (link_up)
222 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
223 TRUE : FALSE;
224 else
225 supported = TRUE;
226 }
227
228 break;
229 case ixgbe_media_type_backplane:
230 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
231 supported = FALSE;
232 else
233 supported = TRUE;
234 break;
235 case ixgbe_media_type_copper:
236 /* only some copper devices support flow control autoneg */
237 switch (hw->device_id) {
238 case IXGBE_DEV_ID_82599_T3_LOM:
239 case IXGBE_DEV_ID_X540T:
240 case IXGBE_DEV_ID_X540T1:
241 case IXGBE_DEV_ID_X540_BYPASS:
242 case IXGBE_DEV_ID_X550T:
243 case IXGBE_DEV_ID_X550T1:
244 case IXGBE_DEV_ID_X550EM_X_10G_T:
245 case IXGBE_DEV_ID_X550EM_A_10G_T:
246 case IXGBE_DEV_ID_X550EM_A_1G_T:
247 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
248 supported = TRUE;
249 break;
250 default:
251 supported = FALSE;
252 }
253 default:
254 break;
255 }
256
257 if (!supported) {
258 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
259 "Device %x does not support flow control autoneg",
260 hw->device_id);
261 }
262
263 return supported;
264 }
265
266 /**
267 * ixgbe_setup_fc_generic - Set up flow control
268 * @hw: pointer to hardware structure
269 *
270 * Called at init time to set up flow control.
271 **/
ixgbe_setup_fc_generic(struct ixgbe_hw * hw)272 int32_t ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
273 {
274 int32_t ret_val = IXGBE_SUCCESS;
275 uint32_t reg = 0, reg_bp = 0;
276 uint16_t reg_cu = 0;
277 bool locked = FALSE;
278
279 DEBUGFUNC("ixgbe_setup_fc");
280
281 /* Validate the requested mode */
282 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
283 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
284 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
285 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
286 goto out;
287 }
288
289 /*
290 * 10gig parts do not have a word in the EEPROM to determine the
291 * default flow control setting, so we explicitly set it to full.
292 */
293 if (hw->fc.requested_mode == ixgbe_fc_default)
294 hw->fc.requested_mode = ixgbe_fc_full;
295
296 /*
297 * Set up the 1G and 10G flow control advertisement registers so the
298 * HW will be able to do fc autoneg once the cable is plugged in. If
299 * we link at 10G, the 1G advertisement is harmless and vice versa.
300 */
301 switch (hw->phy.media_type) {
302 case ixgbe_media_type_backplane:
303 /* some MAC's need RMW protection on AUTOC */
304 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
305 if (ret_val != IXGBE_SUCCESS)
306 goto out;
307
308 /* only backplane uses autoc so fall though */
309 case ixgbe_media_type_fiber_fixed:
310 case ixgbe_media_type_fiber_qsfp:
311 case ixgbe_media_type_fiber:
312 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
313
314 break;
315 case ixgbe_media_type_copper:
316 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
317 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
318 break;
319 default:
320 break;
321 }
322
323 /*
324 * The possible values of fc.requested_mode are:
325 * 0: Flow control is completely disabled
326 * 1: Rx flow control is enabled (we can receive pause frames,
327 * but not send pause frames).
328 * 2: Tx flow control is enabled (we can send pause frames but
329 * we do not support receiving pause frames).
330 * 3: Both Rx and Tx flow control (symmetric) are enabled.
331 * other: Invalid.
332 */
333 switch (hw->fc.requested_mode) {
334 case ixgbe_fc_none:
335 /* Flow control completely disabled by software override. */
336 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
337 if (hw->phy.media_type == ixgbe_media_type_backplane)
338 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
339 IXGBE_AUTOC_ASM_PAUSE);
340 else if (hw->phy.media_type == ixgbe_media_type_copper)
341 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
342 break;
343 case ixgbe_fc_tx_pause:
344 /*
345 * Tx Flow control is enabled, and Rx Flow control is
346 * disabled by software override.
347 */
348 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
349 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
350 if (hw->phy.media_type == ixgbe_media_type_backplane) {
351 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
352 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
353 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
354 reg_cu |= IXGBE_TAF_ASM_PAUSE;
355 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
356 }
357 break;
358 case ixgbe_fc_rx_pause:
359 /*
360 * Rx Flow control is enabled and Tx Flow control is
361 * disabled by software override. Since there really
362 * isn't a way to advertise that we are capable of RX
363 * Pause ONLY, we will advertise that we support both
364 * symmetric and asymmetric Rx PAUSE, as such we fall
365 * through to the fc_full statement. Later, we will
366 * disable the adapter's ability to send PAUSE frames.
367 */
368 case ixgbe_fc_full:
369 /* Flow control (both Rx and Tx) is enabled by SW override. */
370 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
371 if (hw->phy.media_type == ixgbe_media_type_backplane)
372 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
373 IXGBE_AUTOC_ASM_PAUSE;
374 else if (hw->phy.media_type == ixgbe_media_type_copper)
375 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
376 break;
377 default:
378 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
379 "Flow control param set incorrectly\n");
380 ret_val = IXGBE_ERR_CONFIG;
381 goto out;
382 break;
383 }
384
385 if (hw->mac.type < ixgbe_mac_X540) {
386 /*
387 * Enable auto-negotiation between the MAC & PHY;
388 * the MAC will advertise clause 37 flow control.
389 */
390 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
391 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
392
393 /* Disable AN timeout */
394 if (hw->fc.strict_ieee)
395 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
396
397 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
398 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
399 }
400
401 /*
402 * AUTOC restart handles negotiation of 1G and 10G on backplane
403 * and copper. There is no need to set the PCS1GCTL register.
404 *
405 */
406 if (hw->phy.media_type == ixgbe_media_type_backplane) {
407 reg_bp |= IXGBE_AUTOC_AN_RESTART;
408 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
409 if (ret_val)
410 goto out;
411 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
412 (ixgbe_device_supports_autoneg_fc(hw))) {
413 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
414 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
415 }
416
417 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
418 out:
419 return ret_val;
420 }
421
422 /**
423 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
424 * @hw: pointer to hardware structure
425 *
426 * Starts the hardware by filling the bus info structure and media type, clears
427 * all on chip counters, initializes receive address registers, multicast
428 * table, VLAN filter table, calls routine to set up link and flow control
429 * settings, and leaves transmit and receive units disabled and uninitialized
430 **/
ixgbe_start_hw_generic(struct ixgbe_hw * hw)431 int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
432 {
433 int32_t ret_val;
434 uint32_t ctrl_ext;
435 uint16_t device_caps;
436
437 DEBUGFUNC("ixgbe_start_hw_generic");
438
439 /* Set the media type */
440 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
441
442 /* PHY ops initialization must be done in reset_hw() */
443
444 /* Clear the VLAN filter table */
445 hw->mac.ops.clear_vfta(hw);
446
447 /* Clear statistics registers */
448 hw->mac.ops.clear_hw_cntrs(hw);
449
450 /* Set No Snoop Disable */
451 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
452 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
453 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
454 IXGBE_WRITE_FLUSH(hw);
455
456 /* Setup flow control */
457 if (hw->mac.ops.setup_fc) {
458 ret_val = hw->mac.ops.setup_fc(hw);
459 if (ret_val != IXGBE_SUCCESS) {
460 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
461 return ret_val;
462 }
463 }
464
465 /* Cache bit indicating need for crosstalk fix */
466 switch (hw->mac.type) {
467 case ixgbe_mac_82599EB:
468 case ixgbe_mac_X550EM_x:
469 case ixgbe_mac_X550EM_a:
470 hw->mac.ops.get_device_caps(hw, &device_caps);
471 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
472 hw->need_crosstalk_fix = FALSE;
473 else
474 hw->need_crosstalk_fix = TRUE;
475 break;
476 default:
477 hw->need_crosstalk_fix = FALSE;
478 break;
479 }
480
481 /* Clear adapter stopped flag */
482 hw->adapter_stopped = FALSE;
483
484 return IXGBE_SUCCESS;
485 }
486
487 /**
488 * ixgbe_start_hw_gen2 - Init sequence for common device family
489 * @hw: pointer to hw structure
490 *
491 * Performs the init sequence common to the second generation
492 * of 10 GbE devices.
493 * Devices in the second generation:
494 * 82599
495 * X540
496 **/
ixgbe_start_hw_gen2(struct ixgbe_hw * hw)497 int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
498 {
499 uint32_t i;
500 uint32_t regval;
501
502 /* Clear the rate limiters */
503 for (i = 0; i < hw->mac.max_tx_queues; i++) {
504 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
505 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
506 }
507 IXGBE_WRITE_FLUSH(hw);
508
509 /* Disable relaxed ordering */
510 for (i = 0; i < hw->mac.max_tx_queues; i++) {
511 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
512 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
513 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
514 }
515
516 for (i = 0; i < hw->mac.max_rx_queues; i++) {
517 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
518 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
519 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
520 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
521 }
522
523 return IXGBE_SUCCESS;
524 }
525
526 /**
527 * ixgbe_init_hw_generic - Generic hardware initialization
528 * @hw: pointer to hardware structure
529 *
530 * Initialize the hardware by resetting the hardware, filling the bus info
531 * structure and media type, clears all on chip counters, initializes receive
532 * address registers, multicast table, VLAN filter table, calls routine to set
533 * up link and flow control settings, and leaves transmit and receive units
534 * disabled and uninitialized
535 **/
ixgbe_init_hw_generic(struct ixgbe_hw * hw)536 int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw)
537 {
538 int32_t status;
539
540 DEBUGFUNC("ixgbe_init_hw_generic");
541
542 /* Reset the hardware */
543 status = hw->mac.ops.reset_hw(hw);
544
545 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
546 /* Start the HW */
547 status = hw->mac.ops.start_hw(hw);
548 }
549
550 if (status != IXGBE_SUCCESS)
551 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
552
553 return status;
554 }
555
556 /**
557 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
558 * @hw: pointer to hardware structure
559 *
560 * Clears all hardware statistics counters by reading them from the hardware
561 * Statistics counters are clear on read.
562 **/
ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw * hw)563 int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
564 {
565 uint16_t i = 0;
566
567 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
568
569 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
570 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
571 IXGBE_READ_REG(hw, IXGBE_ERRBC);
572 IXGBE_READ_REG(hw, IXGBE_MSPDC);
573 for (i = 0; i < 8; i++)
574 IXGBE_READ_REG(hw, IXGBE_MPC(i));
575
576 IXGBE_READ_REG(hw, IXGBE_MLFC);
577 IXGBE_READ_REG(hw, IXGBE_MRFC);
578 IXGBE_READ_REG(hw, IXGBE_RLEC);
579 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
580 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
581 if (hw->mac.type >= ixgbe_mac_82599EB) {
582 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
583 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
584 } else {
585 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
586 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
587 }
588
589 for (i = 0; i < 8; i++) {
590 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
591 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
592 if (hw->mac.type >= ixgbe_mac_82599EB) {
593 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
594 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
595 } else {
596 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
597 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
598 }
599 }
600 if (hw->mac.type >= ixgbe_mac_82599EB)
601 for (i = 0; i < 8; i++)
602 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
603 IXGBE_READ_REG(hw, IXGBE_PRC64);
604 IXGBE_READ_REG(hw, IXGBE_PRC127);
605 IXGBE_READ_REG(hw, IXGBE_PRC255);
606 IXGBE_READ_REG(hw, IXGBE_PRC511);
607 IXGBE_READ_REG(hw, IXGBE_PRC1023);
608 IXGBE_READ_REG(hw, IXGBE_PRC1522);
609 IXGBE_READ_REG(hw, IXGBE_GPRC);
610 IXGBE_READ_REG(hw, IXGBE_BPRC);
611 IXGBE_READ_REG(hw, IXGBE_MPRC);
612 IXGBE_READ_REG(hw, IXGBE_GPTC);
613 IXGBE_READ_REG(hw, IXGBE_GORCL);
614 IXGBE_READ_REG(hw, IXGBE_GORCH);
615 IXGBE_READ_REG(hw, IXGBE_GOTCL);
616 IXGBE_READ_REG(hw, IXGBE_GOTCH);
617 if (hw->mac.type == ixgbe_mac_82598EB)
618 for (i = 0; i < 8; i++)
619 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
620 IXGBE_READ_REG(hw, IXGBE_RUC);
621 IXGBE_READ_REG(hw, IXGBE_RFC);
622 IXGBE_READ_REG(hw, IXGBE_ROC);
623 IXGBE_READ_REG(hw, IXGBE_RJC);
624 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
625 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
626 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
627 IXGBE_READ_REG(hw, IXGBE_TORL);
628 IXGBE_READ_REG(hw, IXGBE_TORH);
629 IXGBE_READ_REG(hw, IXGBE_TPR);
630 IXGBE_READ_REG(hw, IXGBE_TPT);
631 IXGBE_READ_REG(hw, IXGBE_PTC64);
632 IXGBE_READ_REG(hw, IXGBE_PTC127);
633 IXGBE_READ_REG(hw, IXGBE_PTC255);
634 IXGBE_READ_REG(hw, IXGBE_PTC511);
635 IXGBE_READ_REG(hw, IXGBE_PTC1023);
636 IXGBE_READ_REG(hw, IXGBE_PTC1522);
637 IXGBE_READ_REG(hw, IXGBE_MPTC);
638 IXGBE_READ_REG(hw, IXGBE_BPTC);
639 for (i = 0; i < 16; i++) {
640 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
641 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
642 if (hw->mac.type >= ixgbe_mac_82599EB) {
643 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
644 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
645 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
646 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
647 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
648 } else {
649 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
650 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
651 }
652 }
653
654 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
655 if (hw->phy.id == 0)
656 ixgbe_identify_phy(hw);
657 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
658 IXGBE_MDIO_PCS_DEV_TYPE, &i);
659 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
660 IXGBE_MDIO_PCS_DEV_TYPE, &i);
661 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
662 IXGBE_MDIO_PCS_DEV_TYPE, &i);
663 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
664 IXGBE_MDIO_PCS_DEV_TYPE, &i);
665 }
666
667 return IXGBE_SUCCESS;
668 }
669
670 /**
671 * ixgbe_get_mac_addr_generic - Generic get MAC address
672 * @hw: pointer to hardware structure
673 * @mac_addr: Adapter MAC address
674 *
675 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
676 * A reset of the adapter must be performed prior to calling this function
677 * in order for the MAC address to have been loaded from the EEPROM into RAR0
678 **/
ixgbe_get_mac_addr_generic(struct ixgbe_hw * hw,uint8_t * mac_addr)679 int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *mac_addr)
680 {
681 uint32_t rar_high;
682 uint32_t rar_low;
683 uint16_t i;
684
685 DEBUGFUNC("ixgbe_get_mac_addr_generic");
686
687 #ifdef __sparc64__
688 struct ixgbe_osdep *os = hw->back;
689
690 if (OF_getprop(PCITAG_NODE(os->os_pa.pa_tag), "local-mac-address",
691 mac_addr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
692 return IXGBE_SUCCESS;
693 #endif
694
695 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
696 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
697
698 for (i = 0; i < 4; i++)
699 mac_addr[i] = (uint8_t)(rar_low >> (i*8));
700
701 for (i = 0; i < 2; i++)
702 mac_addr[i+4] = (uint8_t)(rar_high >> (i*8));
703
704 return IXGBE_SUCCESS;
705 }
706
707 /**
708 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
709 * @hw: pointer to hardware structure
710 * @link_status: the link status returned by the PCI config space
711 *
712 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
713 **/
ixgbe_set_pci_config_data_generic(struct ixgbe_hw * hw,uint16_t link_status)714 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
715 uint16_t link_status)
716 {
717 struct ixgbe_mac_info *mac = &hw->mac;
718
719 if (hw->bus.type == ixgbe_bus_type_unknown)
720 hw->bus.type = ixgbe_bus_type_pci_express;
721
722 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
723 case IXGBE_PCI_LINK_WIDTH_1:
724 hw->bus.width = ixgbe_bus_width_pcie_x1;
725 break;
726 case IXGBE_PCI_LINK_WIDTH_2:
727 hw->bus.width = ixgbe_bus_width_pcie_x2;
728 break;
729 case IXGBE_PCI_LINK_WIDTH_4:
730 hw->bus.width = ixgbe_bus_width_pcie_x4;
731 break;
732 case IXGBE_PCI_LINK_WIDTH_8:
733 hw->bus.width = ixgbe_bus_width_pcie_x8;
734 break;
735 default:
736 hw->bus.width = ixgbe_bus_width_unknown;
737 break;
738 }
739
740 switch (link_status & IXGBE_PCI_LINK_SPEED) {
741 case IXGBE_PCI_LINK_SPEED_2500:
742 hw->bus.speed = ixgbe_bus_speed_2500;
743 break;
744 case IXGBE_PCI_LINK_SPEED_5000:
745 hw->bus.speed = ixgbe_bus_speed_5000;
746 break;
747 case IXGBE_PCI_LINK_SPEED_8000:
748 hw->bus.speed = ixgbe_bus_speed_8000;
749 break;
750 default:
751 hw->bus.speed = ixgbe_bus_speed_unknown;
752 break;
753 }
754
755 mac->ops.set_lan_id(hw);
756 }
757
758 /**
759 * ixgbe_get_bus_info_generic - Generic set PCI bus info
760 * @hw: pointer to hardware structure
761 *
762 * Gets the PCI bus info (speed, width, type) then calls helper function to
763 * store this data within the ixgbe_hw structure.
764 **/
ixgbe_get_bus_info_generic(struct ixgbe_hw * hw)765 int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
766 {
767 uint16_t link_status;
768
769 DEBUGFUNC("ixgbe_get_bus_info_generic");
770
771 /* Get the negotiated link width and speed from PCI config space */
772 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
773
774 ixgbe_set_pci_config_data_generic(hw, link_status);
775
776 return IXGBE_SUCCESS;
777 }
778
779 /**
780 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
781 * @hw: pointer to the HW structure
782 *
783 * Determines the LAN function id by reading memory-mapped registers and swaps
784 * the port value if requested, and set MAC instance for devices that share
785 * CS4227.
786 **/
ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw * hw)787 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
788 {
789 struct ixgbe_bus_info *bus = &hw->bus;
790 uint32_t reg;
791 uint16_t ee_ctrl_4;
792
793 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
794
795 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
796 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
797 bus->lan_id = bus->func;
798
799 /* check for a port swap */
800 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
801 if (reg & IXGBE_FACTPS_LFS)
802 bus->func ^= 0x1;
803
804 /* Get MAC instance from EEPROM for configuring CS4227 */
805 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
806 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
807 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
808 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
809 }
810 }
811
812 /**
813 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
814 * @hw: pointer to hardware structure
815 *
816 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
817 * disables transmit and receive units. The adapter_stopped flag is used by
818 * the shared code and drivers to determine if the adapter is in a stopped
819 * state and should not touch the hardware.
820 **/
ixgbe_stop_adapter_generic(struct ixgbe_hw * hw)821 int32_t ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
822 {
823 uint32_t reg_val;
824 uint16_t i;
825
826 DEBUGFUNC("ixgbe_stop_adapter_generic");
827
828 /*
829 * Set the adapter_stopped flag so other driver functions stop touching
830 * the hardware
831 */
832 hw->adapter_stopped = TRUE;
833
834 /* Disable the receive unit */
835 ixgbe_disable_rx(hw);
836
837 /* Clear interrupt mask to stop interrupts from being generated */
838 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
839
840 /* Clear any pending interrupts, flush previous writes */
841 IXGBE_READ_REG(hw, IXGBE_EICR);
842
843 /* Disable the transmit unit. Each queue must be disabled. */
844 for (i = 0; i < hw->mac.max_tx_queues; i++)
845 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
846
847 /* Disable the receive unit by stopping each queue */
848 for (i = 0; i < hw->mac.max_rx_queues; i++) {
849 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
850 reg_val &= ~IXGBE_RXDCTL_ENABLE;
851 reg_val |= IXGBE_RXDCTL_SWFLSH;
852 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
853 }
854
855 /* flush all queues disables */
856 IXGBE_WRITE_FLUSH(hw);
857 msec_delay(2);
858
859 /*
860 * Prevent the PCI-E bus from hanging by disabling PCI-E master
861 * access and verify no pending requests
862 */
863 return ixgbe_disable_pcie_master(hw);
864 }
865
866 /**
867 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
868 * @hw: pointer to hardware structure
869 * @index: led number to turn on
870 **/
ixgbe_led_on_generic(struct ixgbe_hw * hw,uint32_t index)871 int32_t ixgbe_led_on_generic(struct ixgbe_hw *hw, uint32_t index)
872 {
873 uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
874
875 DEBUGFUNC("ixgbe_led_on_generic");
876
877 if (index > 3)
878 return IXGBE_ERR_PARAM;
879
880 /* To turn on the LED, set mode to ON. */
881 led_reg &= ~IXGBE_LED_MODE_MASK(index);
882 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
883 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
884 IXGBE_WRITE_FLUSH(hw);
885
886 return IXGBE_SUCCESS;
887 }
888
889 /**
890 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
891 * @hw: pointer to hardware structure
892 * @index: led number to turn off
893 **/
ixgbe_led_off_generic(struct ixgbe_hw * hw,uint32_t index)894 int32_t ixgbe_led_off_generic(struct ixgbe_hw *hw, uint32_t index)
895 {
896 uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
897
898 DEBUGFUNC("ixgbe_led_off_generic");
899
900 if (index > 3)
901 return IXGBE_ERR_PARAM;
902
903 /* To turn off the LED, set mode to OFF. */
904 led_reg &= ~IXGBE_LED_MODE_MASK(index);
905 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
906 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
907 IXGBE_WRITE_FLUSH(hw);
908
909 return IXGBE_SUCCESS;
910 }
911
912 /**
913 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
914 * @hw: pointer to hardware structure
915 *
916 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
917 * ixgbe_hw struct in order to set up EEPROM access.
918 **/
ixgbe_init_eeprom_params_generic(struct ixgbe_hw * hw)919 int32_t ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
920 {
921 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
922 uint32_t eec;
923 uint16_t eeprom_size;
924
925 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
926
927 if (eeprom->type == ixgbe_eeprom_uninitialized) {
928 eeprom->type = ixgbe_eeprom_none;
929 /* Set default semaphore delay to 10ms which is a well
930 * tested value */
931 eeprom->semaphore_delay = 10;
932 /* Clear EEPROM page size, it will be initialized as needed */
933 eeprom->word_page_size = 0;
934
935 /*
936 * Check for EEPROM present first.
937 * If not present leave as none
938 */
939 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
940 if (eec & IXGBE_EEC_PRES) {
941 eeprom->type = ixgbe_eeprom_spi;
942
943 /*
944 * SPI EEPROM is assumed here. This code would need to
945 * change if a future EEPROM is not SPI.
946 */
947 eeprom_size = (uint16_t)((eec & IXGBE_EEC_SIZE) >>
948 IXGBE_EEC_SIZE_SHIFT);
949 eeprom->word_size = 1 << (eeprom_size +
950 IXGBE_EEPROM_WORD_SIZE_SHIFT);
951 }
952
953 if (eec & IXGBE_EEC_ADDR_SIZE)
954 eeprom->address_bits = 16;
955 else
956 eeprom->address_bits = 8;
957 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
958 "%d\n", eeprom->type, eeprom->word_size,
959 eeprom->address_bits);
960 }
961
962 return IXGBE_SUCCESS;
963 }
964
965 /**
966 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
967 * @hw: pointer to hardware structure
968 * @offset: offset within the EEPROM to be written to
969 * @words: number of word(s)
970 * @data: 16 bit word(s) to be written to the EEPROM
971 *
972 * If ixgbe_eeprom_update_checksum is not called after this function, the
973 * EEPROM will most likely contain an invalid checksum.
974 **/
ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw * hw,uint16_t offset,uint16_t words,uint16_t * data)975 static int32_t ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, uint16_t offset,
976 uint16_t words, uint16_t *data)
977 {
978 int32_t status;
979 uint16_t word;
980 uint16_t page_size;
981 uint16_t i;
982 uint8_t write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
983
984 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
985
986 /* Prepare the EEPROM for writing */
987 status = ixgbe_acquire_eeprom(hw);
988
989 if (status == IXGBE_SUCCESS) {
990 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
991 ixgbe_release_eeprom(hw);
992 status = IXGBE_ERR_EEPROM;
993 }
994 }
995
996 if (status == IXGBE_SUCCESS) {
997 for (i = 0; i < words; i++) {
998 ixgbe_standby_eeprom(hw);
999
1000 /* Send the WRITE ENABLE command (8 bit opcode ) */
1001 ixgbe_shift_out_eeprom_bits(hw,
1002 IXGBE_EEPROM_WREN_OPCODE_SPI,
1003 IXGBE_EEPROM_OPCODE_BITS);
1004
1005 ixgbe_standby_eeprom(hw);
1006
1007 /*
1008 * Some SPI eeproms use the 8th address bit embedded
1009 * in the opcode
1010 */
1011 if ((hw->eeprom.address_bits == 8) &&
1012 ((offset + i) >= 128))
1013 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1014
1015 /* Send the Write command (8-bit opcode + addr) */
1016 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1017 IXGBE_EEPROM_OPCODE_BITS);
1018 ixgbe_shift_out_eeprom_bits(hw, (uint16_t)((offset + i) * 2),
1019 hw->eeprom.address_bits);
1020
1021 page_size = hw->eeprom.word_page_size;
1022
1023 /* Send the data in burst via SPI*/
1024 do {
1025 word = data[i];
1026 word = (word >> 8) | (word << 8);
1027 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1028
1029 if (page_size == 0)
1030 break;
1031
1032 /* do not wrap around page */
1033 if (((offset + i) & (page_size - 1)) ==
1034 (page_size - 1))
1035 break;
1036 } while (++i < words);
1037
1038 ixgbe_standby_eeprom(hw);
1039 msec_delay(10);
1040 }
1041 /* Done with writing - release the EEPROM */
1042 ixgbe_release_eeprom(hw);
1043 }
1044
1045 return status;
1046 }
1047
1048 /**
1049 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1050 * @hw: pointer to hardware structure
1051 * @offset: offset within the EEPROM to be written to
1052 * @data: 16 bit word to be written to the EEPROM
1053 *
1054 * If ixgbe_eeprom_update_checksum is not called after this function, the
1055 * EEPROM will most likely contain an invalid checksum.
1056 **/
ixgbe_write_eeprom_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t data)1057 int32_t ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
1058 {
1059 int32_t status;
1060
1061 DEBUGFUNC("ixgbe_write_eeprom_generic");
1062
1063 hw->eeprom.ops.init_params(hw);
1064
1065 if (offset >= hw->eeprom.word_size) {
1066 status = IXGBE_ERR_EEPROM;
1067 goto out;
1068 }
1069
1070 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1071
1072 out:
1073 return status;
1074 }
1075
1076 /**
1077 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1078 * @hw: pointer to hardware structure
1079 * @offset: offset within the EEPROM to be read
1080 * @words: number of word(s)
1081 * @data: read 16 bit word(s) from EEPROM
1082 *
1083 * Reads 16 bit word(s) from EEPROM through bit-bang method
1084 **/
ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw * hw,uint16_t offset,uint16_t words,uint16_t * data)1085 static int32_t ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, uint16_t offset,
1086 uint16_t words, uint16_t *data)
1087 {
1088 int32_t status;
1089 uint16_t word_in;
1090 uint8_t read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1091 uint16_t i;
1092
1093 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1094
1095 /* Prepare the EEPROM for reading */
1096 status = ixgbe_acquire_eeprom(hw);
1097
1098 if (status == IXGBE_SUCCESS) {
1099 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1100 ixgbe_release_eeprom(hw);
1101 status = IXGBE_ERR_EEPROM;
1102 }
1103 }
1104
1105 if (status == IXGBE_SUCCESS) {
1106 for (i = 0; i < words; i++) {
1107 ixgbe_standby_eeprom(hw);
1108 /*
1109 * Some SPI eeproms use the 8th address bit embedded
1110 * in the opcode
1111 */
1112 if ((hw->eeprom.address_bits == 8) &&
1113 ((offset + i) >= 128))
1114 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1115
1116 /* Send the READ command (opcode + addr) */
1117 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1118 IXGBE_EEPROM_OPCODE_BITS);
1119 ixgbe_shift_out_eeprom_bits(hw, (uint16_t)((offset + i) * 2),
1120 hw->eeprom.address_bits);
1121
1122 /* Read the data. */
1123 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1124 data[i] = (word_in >> 8) | (word_in << 8);
1125 }
1126
1127 /* End this read operation */
1128 ixgbe_release_eeprom(hw);
1129 }
1130
1131 return status;
1132 }
1133
1134 /**
1135 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1136 * @hw: pointer to hardware structure
1137 * @offset: offset within the EEPROM to be read
1138 * @data: read 16 bit value from EEPROM
1139 *
1140 * Reads 16 bit value from EEPROM through bit-bang method
1141 **/
ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t * data)1142 int32_t ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, uint16_t offset,
1143 uint16_t *data)
1144 {
1145 int32_t status;
1146
1147 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1148
1149 hw->eeprom.ops.init_params(hw);
1150
1151 if (offset >= hw->eeprom.word_size) {
1152 status = IXGBE_ERR_EEPROM;
1153 goto out;
1154 }
1155
1156 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1157
1158 out:
1159 return status;
1160 }
1161
1162 /**
1163 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1164 * @hw: pointer to hardware structure
1165 * @offset: offset of word in the EEPROM to read
1166 * @words: number of word(s)
1167 * @data: 16 bit word(s) from the EEPROM
1168 *
1169 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1170 **/
ixgbe_read_eerd_buffer_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t words,uint16_t * data)1171 int32_t ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, uint16_t offset,
1172 uint16_t words, uint16_t *data)
1173 {
1174 uint32_t eerd;
1175 int32_t status = IXGBE_SUCCESS;
1176 uint32_t i;
1177
1178 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1179
1180 hw->eeprom.ops.init_params(hw);
1181
1182 if (words == 0) {
1183 status = IXGBE_ERR_INVALID_ARGUMENT;
1184 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1185 goto out;
1186 }
1187
1188 if (offset >= hw->eeprom.word_size) {
1189 status = IXGBE_ERR_EEPROM;
1190 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1191 goto out;
1192 }
1193
1194 for (i = 0; i < words; i++) {
1195 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1196 IXGBE_EEPROM_RW_REG_START;
1197
1198 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1199 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1200
1201 if (status == IXGBE_SUCCESS) {
1202 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1203 IXGBE_EEPROM_RW_REG_DATA);
1204 } else {
1205 DEBUGOUT("Eeprom read timed out\n");
1206 goto out;
1207 }
1208 }
1209 out:
1210 return status;
1211 }
1212
1213 /**
1214 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1215 * @hw: pointer to hardware structure
1216 * @offset: offset of word in the EEPROM to read
1217 * @data: word read from the EEPROM
1218 *
1219 * Reads a 16 bit word from the EEPROM using the EERD register.
1220 **/
ixgbe_read_eerd_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t * data)1221 int32_t ixgbe_read_eerd_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t *data)
1222 {
1223 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1224 }
1225
1226 /**
1227 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1228 * @hw: pointer to hardware structure
1229 * @offset: offset of word in the EEPROM to write
1230 * @words: number of word(s)
1231 * @data: word(s) write to the EEPROM
1232 *
1233 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1234 **/
ixgbe_write_eewr_buffer_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t words,uint16_t * data)1235 int32_t ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, uint16_t offset,
1236 uint16_t words, uint16_t *data)
1237 {
1238 uint32_t eewr;
1239 int32_t status = IXGBE_SUCCESS;
1240 uint16_t i;
1241
1242 DEBUGFUNC("ixgbe_write_eewr_generic");
1243
1244 hw->eeprom.ops.init_params(hw);
1245
1246 if (words == 0) {
1247 status = IXGBE_ERR_INVALID_ARGUMENT;
1248 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1249 goto out;
1250 }
1251
1252 if (offset >= hw->eeprom.word_size) {
1253 status = IXGBE_ERR_EEPROM;
1254 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1255 goto out;
1256 }
1257
1258 for (i = 0; i < words; i++) {
1259 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1260 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1261 IXGBE_EEPROM_RW_REG_START;
1262
1263 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1264 if (status != IXGBE_SUCCESS) {
1265 DEBUGOUT("Eeprom write EEWR timed out\n");
1266 goto out;
1267 }
1268
1269 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1270
1271 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1272 if (status != IXGBE_SUCCESS) {
1273 DEBUGOUT("Eeprom write EEWR timed out\n");
1274 goto out;
1275 }
1276 }
1277
1278 out:
1279 return status;
1280 }
1281
1282 /**
1283 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1284 * @hw: pointer to hardware structure
1285 * @offset: offset of word in the EEPROM to write
1286 * @data: word write to the EEPROM
1287 *
1288 * Write a 16 bit word to the EEPROM using the EEWR register.
1289 **/
ixgbe_write_eewr_generic(struct ixgbe_hw * hw,uint16_t offset,uint16_t data)1290 int32_t ixgbe_write_eewr_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
1291 {
1292 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1293 }
1294
1295 /**
1296 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1297 * @hw: pointer to hardware structure
1298 * @ee_reg: EEPROM flag for polling
1299 *
1300 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1301 * read or write is done respectively.
1302 **/
ixgbe_poll_eerd_eewr_done(struct ixgbe_hw * hw,uint32_t ee_reg)1303 int32_t ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, uint32_t ee_reg)
1304 {
1305 uint32_t i;
1306 uint32_t reg;
1307 int32_t status = IXGBE_ERR_EEPROM;
1308
1309 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1310
1311 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1312 if (ee_reg == IXGBE_NVM_POLL_READ)
1313 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1314 else
1315 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1316
1317 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1318 status = IXGBE_SUCCESS;
1319 break;
1320 }
1321 usec_delay(5);
1322 }
1323
1324 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1325 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1326 "EEPROM read/write done polling timed out");
1327
1328 return status;
1329 }
1330
1331 /**
1332 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1333 * @hw: pointer to hardware structure
1334 *
1335 * Prepares EEPROM for access using bit-bang method. This function should
1336 * be called before issuing a command to the EEPROM.
1337 **/
ixgbe_acquire_eeprom(struct ixgbe_hw * hw)1338 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1339 {
1340 int32_t status = IXGBE_SUCCESS;
1341 uint32_t eec;
1342 uint32_t i;
1343
1344 DEBUGFUNC("ixgbe_acquire_eeprom");
1345
1346 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1347 != IXGBE_SUCCESS)
1348 status = IXGBE_ERR_SWFW_SYNC;
1349
1350 if (status == IXGBE_SUCCESS) {
1351 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1352
1353 /* Request EEPROM Access */
1354 eec |= IXGBE_EEC_REQ;
1355 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1356
1357 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1358 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1359 if (eec & IXGBE_EEC_GNT)
1360 break;
1361 usec_delay(5);
1362 }
1363
1364 /* Release if grant not acquired */
1365 if (!(eec & IXGBE_EEC_GNT)) {
1366 eec &= ~IXGBE_EEC_REQ;
1367 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1368 DEBUGOUT("Could not acquire EEPROM grant\n");
1369
1370 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1371 status = IXGBE_ERR_EEPROM;
1372 }
1373
1374 /* Setup EEPROM for Read/Write */
1375 if (status == IXGBE_SUCCESS) {
1376 /* Clear CS and SK */
1377 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1378 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1379 IXGBE_WRITE_FLUSH(hw);
1380 usec_delay(1);
1381 }
1382 }
1383 return status;
1384 }
1385
1386 /**
1387 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1388 * @hw: pointer to hardware structure
1389 *
1390 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1391 **/
ixgbe_get_eeprom_semaphore(struct ixgbe_hw * hw)1392 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1393 {
1394 int32_t status = IXGBE_ERR_EEPROM;
1395 uint32_t timeout = 2000;
1396 uint32_t i;
1397 uint32_t swsm;
1398
1399 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1400
1401
1402 /* Get SMBI software semaphore between device drivers first */
1403 for (i = 0; i < timeout; i++) {
1404 /*
1405 * If the SMBI bit is 0 when we read it, then the bit will be
1406 * set and we have the semaphore
1407 */
1408 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1409 if (!(swsm & IXGBE_SWSM_SMBI)) {
1410 status = IXGBE_SUCCESS;
1411 break;
1412 }
1413 usec_delay(50);
1414 }
1415
1416 if (i == timeout) {
1417 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1418 "not granted.\n");
1419 /*
1420 * this release is particularly important because our attempts
1421 * above to get the semaphore may have succeeded, and if there
1422 * was a timeout, we should unconditionally clear the semaphore
1423 * bits to free the driver to make progress
1424 */
1425 ixgbe_release_eeprom_semaphore(hw);
1426
1427 usec_delay(50);
1428 /*
1429 * one last try
1430 * If the SMBI bit is 0 when we read it, then the bit will be
1431 * set and we have the semaphore
1432 */
1433 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1434 if (!(swsm & IXGBE_SWSM_SMBI))
1435 status = IXGBE_SUCCESS;
1436 }
1437
1438 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1439 if (status == IXGBE_SUCCESS) {
1440 for (i = 0; i < timeout; i++) {
1441 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1442
1443 /* Set the SW EEPROM semaphore bit to request access */
1444 swsm |= IXGBE_SWSM_SWESMBI;
1445 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1446
1447 /*
1448 * If we set the bit successfully then we got the
1449 * semaphore.
1450 */
1451 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1452 if (swsm & IXGBE_SWSM_SWESMBI)
1453 break;
1454
1455 usec_delay(50);
1456 }
1457
1458 /*
1459 * Release semaphores and return error if SW EEPROM semaphore
1460 * was not granted because we don't have access to the EEPROM
1461 */
1462 if (i >= timeout) {
1463 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1464 "SWESMBI Software EEPROM semaphore not granted.\n");
1465 ixgbe_release_eeprom_semaphore(hw);
1466 status = IXGBE_ERR_EEPROM;
1467 }
1468 } else {
1469 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1470 "Software semaphore SMBI between device drivers "
1471 "not granted.\n");
1472 }
1473
1474 return status;
1475 }
1476
1477 /**
1478 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1479 * @hw: pointer to hardware structure
1480 *
1481 * This function clears hardware semaphore bits.
1482 **/
ixgbe_release_eeprom_semaphore(struct ixgbe_hw * hw)1483 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1484 {
1485 uint32_t swsm;
1486
1487 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1488
1489 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1490
1491 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1492 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1493 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1494 IXGBE_WRITE_FLUSH(hw);
1495 }
1496
1497 /**
1498 * ixgbe_ready_eeprom - Polls for EEPROM ready
1499 * @hw: pointer to hardware structure
1500 **/
ixgbe_ready_eeprom(struct ixgbe_hw * hw)1501 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1502 {
1503 int32_t status = IXGBE_SUCCESS;
1504 uint16_t i;
1505 uint8_t spi_stat_reg;
1506
1507 DEBUGFUNC("ixgbe_ready_eeprom");
1508
1509 /*
1510 * Read "Status Register" repeatedly until the LSB is cleared. The
1511 * EEPROM will signal that the command has been completed by clearing
1512 * bit 0 of the internal status register. If it's not cleared within
1513 * 5 milliseconds, then error out.
1514 */
1515 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1516 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1517 IXGBE_EEPROM_OPCODE_BITS);
1518 spi_stat_reg = (uint8_t)ixgbe_shift_in_eeprom_bits(hw, 8);
1519 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1520 break;
1521
1522 usec_delay(5);
1523 ixgbe_standby_eeprom(hw);
1524 }
1525
1526 /*
1527 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1528 * devices (and only 0-5mSec on 5V devices)
1529 */
1530 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1531 DEBUGOUT("SPI EEPROM Status error\n");
1532 status = IXGBE_ERR_EEPROM;
1533 }
1534
1535 return status;
1536 }
1537
1538 /**
1539 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1540 * @hw: pointer to hardware structure
1541 **/
ixgbe_standby_eeprom(struct ixgbe_hw * hw)1542 void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1543 {
1544 uint32_t eec;
1545
1546 DEBUGFUNC("ixgbe_standby_eeprom");
1547
1548 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1549
1550 /* Toggle CS to flush commands */
1551 eec |= IXGBE_EEC_CS;
1552 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1553 IXGBE_WRITE_FLUSH(hw);
1554 usec_delay(1);
1555 eec &= ~IXGBE_EEC_CS;
1556 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1557 IXGBE_WRITE_FLUSH(hw);
1558 usec_delay(1);
1559 }
1560
1561 /**
1562 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1563 * @hw: pointer to hardware structure
1564 * @data: data to send to the EEPROM
1565 * @count: number of bits to shift out
1566 **/
ixgbe_shift_out_eeprom_bits(struct ixgbe_hw * hw,uint16_t data,uint16_t count)1567 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
1568 uint16_t count)
1569 {
1570 uint32_t eec;
1571 uint32_t mask;
1572 uint32_t i;
1573
1574 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1575
1576 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1577
1578 /*
1579 * Mask is used to shift "count" bits of "data" out to the EEPROM
1580 * one bit at a time. Determine the starting bit based on count
1581 */
1582 mask = 0x01 << (count - 1);
1583
1584 for (i = 0; i < count; i++) {
1585 /*
1586 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1587 * "1", and then raising and then lowering the clock (the SK
1588 * bit controls the clock input to the EEPROM). A "0" is
1589 * shifted out to the EEPROM by setting "DI" to "0" and then
1590 * raising and then lowering the clock.
1591 */
1592 if (data & mask)
1593 eec |= IXGBE_EEC_DI;
1594 else
1595 eec &= ~IXGBE_EEC_DI;
1596
1597 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1598 IXGBE_WRITE_FLUSH(hw);
1599
1600 usec_delay(1);
1601
1602 ixgbe_raise_eeprom_clk(hw, &eec);
1603 ixgbe_lower_eeprom_clk(hw, &eec);
1604
1605 /*
1606 * Shift mask to signify next bit of data to shift in to the
1607 * EEPROM
1608 */
1609 mask = mask >> 1;
1610 }
1611
1612 /* We leave the "DI" bit set to "0" when we leave this routine. */
1613 eec &= ~IXGBE_EEC_DI;
1614 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1615 IXGBE_WRITE_FLUSH(hw);
1616 }
1617
1618 /**
1619 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1620 * @hw: pointer to hardware structure
1621 * @count: number of bits to shift
1622 **/
ixgbe_shift_in_eeprom_bits(struct ixgbe_hw * hw,uint16_t count)1623 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
1624 {
1625 uint32_t eec;
1626 uint32_t i;
1627 uint16_t data = 0;
1628
1629 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1630
1631 /*
1632 * In order to read a register from the EEPROM, we need to shift
1633 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1634 * the clock input to the EEPROM (setting the SK bit), and then reading
1635 * the value of the "DO" bit. During this "shifting in" process the
1636 * "DI" bit should always be clear.
1637 */
1638 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1639
1640 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1641
1642 for (i = 0; i < count; i++) {
1643 data = data << 1;
1644 ixgbe_raise_eeprom_clk(hw, &eec);
1645
1646 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1647
1648 eec &= ~(IXGBE_EEC_DI);
1649 if (eec & IXGBE_EEC_DO)
1650 data |= 1;
1651
1652 ixgbe_lower_eeprom_clk(hw, &eec);
1653 }
1654
1655 return data;
1656 }
1657
1658 /**
1659 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1660 * @hw: pointer to hardware structure
1661 * @eec: EEC register's current value
1662 **/
ixgbe_raise_eeprom_clk(struct ixgbe_hw * hw,uint32_t * eec)1663 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1664 {
1665 DEBUGFUNC("ixgbe_raise_eeprom_clk");
1666
1667 /*
1668 * Raise the clock input to the EEPROM
1669 * (setting the SK bit), then delay
1670 */
1671 *eec = *eec | IXGBE_EEC_SK;
1672 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
1673 IXGBE_WRITE_FLUSH(hw);
1674 usec_delay(1);
1675 }
1676
1677 /**
1678 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1679 * @hw: pointer to hardware structure
1680 * @eec: EEC's current value
1681 **/
ixgbe_lower_eeprom_clk(struct ixgbe_hw * hw,uint32_t * eec)1682 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1683 {
1684 DEBUGFUNC("ixgbe_lower_eeprom_clk");
1685
1686 /*
1687 * Lower the clock input to the EEPROM (clearing the SK bit), then
1688 * delay
1689 */
1690 *eec = *eec & ~IXGBE_EEC_SK;
1691 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
1692 IXGBE_WRITE_FLUSH(hw);
1693 usec_delay(1);
1694 }
1695
1696 /**
1697 * ixgbe_release_eeprom - Release EEPROM, release semaphores
1698 * @hw: pointer to hardware structure
1699 **/
ixgbe_release_eeprom(struct ixgbe_hw * hw)1700 void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1701 {
1702 uint32_t eec;
1703
1704 DEBUGFUNC("ixgbe_release_eeprom");
1705
1706 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1707
1708 eec |= IXGBE_EEC_CS; /* Pull CS high */
1709 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1710
1711 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1712 IXGBE_WRITE_FLUSH(hw);
1713
1714 usec_delay(1);
1715
1716 /* Stop requesting EEPROM access */
1717 eec &= ~IXGBE_EEC_REQ;
1718 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1719
1720 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1721
1722 /* Delay before attempt to obtain semaphore again to allow FW access */
1723 msec_delay(hw->eeprom.semaphore_delay);
1724 }
1725
1726 /**
1727 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1728 * @hw: pointer to hardware structure
1729 *
1730 * Returns a negative error code on error, or the 16-bit checksum
1731 **/
ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw * hw)1732 int32_t ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1733 {
1734 uint16_t i;
1735 uint16_t j;
1736 uint16_t checksum = 0;
1737 uint16_t length = 0;
1738 uint16_t pointer = 0;
1739 uint16_t word = 0;
1740
1741 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1742
1743 /* Include 0x0-0x3F in the checksum */
1744 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1745 if (hw->eeprom.ops.read(hw, i, &word)) {
1746 DEBUGOUT("EEPROM read failed\n");
1747 return IXGBE_ERR_EEPROM;
1748 }
1749 checksum += word;
1750 }
1751
1752 /* Include all data from pointers except for the fw pointer */
1753 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1754 if (hw->eeprom.ops.read(hw, i, &pointer)) {
1755 DEBUGOUT("EEPROM read failed\n");
1756 return IXGBE_ERR_EEPROM;
1757 }
1758
1759 /* If the pointer seems invalid */
1760 if (pointer == 0xFFFF || pointer == 0)
1761 continue;
1762
1763 if (hw->eeprom.ops.read(hw, pointer, &length)) {
1764 DEBUGOUT("EEPROM read failed\n");
1765 return IXGBE_ERR_EEPROM;
1766 }
1767
1768 if (length == 0xFFFF || length == 0)
1769 continue;
1770
1771 for (j = pointer + 1; j <= pointer + length; j++) {
1772 if (hw->eeprom.ops.read(hw, j, &word)) {
1773 DEBUGOUT("EEPROM read failed\n");
1774 return IXGBE_ERR_EEPROM;
1775 }
1776 checksum += word;
1777 }
1778 }
1779
1780 checksum = (uint16_t)IXGBE_EEPROM_SUM - checksum;
1781
1782 return (int32_t)checksum;
1783 }
1784
1785 /**
1786 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1787 * @hw: pointer to hardware structure
1788 * @checksum_val: calculated checksum
1789 *
1790 * Performs checksum calculation and validates the EEPROM checksum. If the
1791 * caller does not need checksum_val, the value can be NULL.
1792 **/
ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw * hw,uint16_t * checksum_val)1793 int32_t ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1794 uint16_t *checksum_val)
1795 {
1796 int32_t status;
1797 uint16_t checksum;
1798 uint16_t read_checksum = 0;
1799
1800 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1801
1802 /* Read the first word from the EEPROM. If this times out or fails, do
1803 * not continue or we could be in for a very long wait while every
1804 * EEPROM read fails
1805 */
1806 status = hw->eeprom.ops.read(hw, 0, &checksum);
1807 if (status) {
1808 DEBUGOUT("EEPROM read failed\n");
1809 return status;
1810 }
1811
1812 status = hw->eeprom.ops.calc_checksum(hw);
1813 if (status < 0)
1814 return status;
1815
1816 checksum = (uint16_t)(status & 0xffff);
1817
1818 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1819 if (status) {
1820 DEBUGOUT("EEPROM read failed\n");
1821 return status;
1822 }
1823
1824 /* Verify read checksum from EEPROM is the same as
1825 * calculated checksum
1826 */
1827 if (read_checksum != checksum)
1828 status = IXGBE_ERR_EEPROM_CHECKSUM;
1829
1830 /* If the user cares, return the calculated checksum */
1831 if (checksum_val)
1832 *checksum_val = checksum;
1833
1834 return status;
1835 }
1836
1837 /**
1838 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1839 * @hw: pointer to hardware structure
1840 **/
ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw * hw)1841 int32_t ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1842 {
1843 int32_t status;
1844 uint16_t checksum;
1845
1846 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1847
1848 /* Read the first word from the EEPROM. If this times out or fails, do
1849 * not continue or we could be in for a very long wait while every
1850 * EEPROM read fails
1851 */
1852 status = hw->eeprom.ops.read(hw, 0, &checksum);
1853 if (status) {
1854 DEBUGOUT("EEPROM read failed\n");
1855 return status;
1856 }
1857
1858 status = hw->eeprom.ops.calc_checksum(hw);
1859 if (status < 0)
1860 return status;
1861
1862 checksum = (uint16_t)(status & 0xffff);
1863
1864 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
1865
1866 return status;
1867 }
1868
1869 /**
1870 * ixgbe_validate_mac_addr - Validate MAC address
1871 * @mac_addr: pointer to MAC address.
1872 *
1873 * Tests a MAC address to ensure it is a valid Individual Address
1874 **/
ixgbe_validate_mac_addr(uint8_t * mac_addr)1875 int32_t ixgbe_validate_mac_addr(uint8_t *mac_addr)
1876 {
1877 int32_t status = IXGBE_SUCCESS;
1878
1879 DEBUGFUNC("ixgbe_validate_mac_addr");
1880
1881 /* Make sure it is not a multicast address */
1882 if (IXGBE_IS_MULTICAST(mac_addr)) {
1883 DEBUGOUT("MAC address is multicast\n");
1884 status = IXGBE_ERR_INVALID_MAC_ADDR;
1885 /* Not a broadcast address */
1886 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
1887 DEBUGOUT("MAC address is broadcast\n");
1888 status = IXGBE_ERR_INVALID_MAC_ADDR;
1889 /* Reject the zero address */
1890 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1891 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1892 DEBUGOUT("MAC address is all zeros\n");
1893 status = IXGBE_ERR_INVALID_MAC_ADDR;
1894 }
1895 return status;
1896 }
1897
1898 /**
1899 * ixgbe_set_rar_generic - Set Rx address register
1900 * @hw: pointer to hardware structure
1901 * @index: Receive address register to write
1902 * @addr: Address to put into receive address register
1903 * @vmdq: VMDq "set" or "pool" index
1904 * @enable_addr: set flag that address is active
1905 *
1906 * Puts an ethernet address into a receive address register.
1907 **/
ixgbe_set_rar_generic(struct ixgbe_hw * hw,uint32_t index,uint8_t * addr,uint32_t vmdq,uint32_t enable_addr)1908 int32_t ixgbe_set_rar_generic(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
1909 uint32_t vmdq, uint32_t enable_addr)
1910 {
1911 uint32_t rar_low, rar_high;
1912 uint32_t rar_entries = hw->mac.num_rar_entries;
1913
1914 DEBUGFUNC("ixgbe_set_rar_generic");
1915
1916 /* Make sure we are using a valid rar index range */
1917 if (index >= rar_entries) {
1918 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1919 "RAR index %d is out of range.\n", index);
1920 return IXGBE_ERR_INVALID_ARGUMENT;
1921 }
1922
1923 /* setup VMDq pool selection before this RAR gets enabled */
1924 hw->mac.ops.set_vmdq(hw, index, vmdq);
1925
1926 /*
1927 * HW expects these in little endian so we reverse the byte
1928 * order from network order (big endian) to little endian
1929 */
1930 rar_low = ((uint32_t)addr[0] |
1931 ((uint32_t)addr[1] << 8) |
1932 ((uint32_t)addr[2] << 16) |
1933 ((uint32_t)addr[3] << 24));
1934 /*
1935 * Some parts put the VMDq setting in the extra RAH bits,
1936 * so save everything except the lower 16 bits that hold part
1937 * of the address and the address valid bit.
1938 */
1939 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1940 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1941 rar_high |= ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
1942
1943 if (enable_addr != 0)
1944 rar_high |= IXGBE_RAH_AV;
1945
1946 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1947 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1948
1949 return IXGBE_SUCCESS;
1950 }
1951
1952 /**
1953 * ixgbe_clear_rar_generic - Remove Rx address register
1954 * @hw: pointer to hardware structure
1955 * @index: Receive address register to write
1956 *
1957 * Clears an ethernet address from a receive address register.
1958 **/
ixgbe_clear_rar_generic(struct ixgbe_hw * hw,uint32_t index)1959 int32_t ixgbe_clear_rar_generic(struct ixgbe_hw *hw, uint32_t index)
1960 {
1961 uint32_t rar_high;
1962 uint32_t rar_entries = hw->mac.num_rar_entries;
1963
1964 DEBUGFUNC("ixgbe_clear_rar_generic");
1965
1966 /* Make sure we are using a valid rar index range */
1967 if (index >= rar_entries) {
1968 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1969 "RAR index %d is out of range.\n", index);
1970 return IXGBE_ERR_INVALID_ARGUMENT;
1971 }
1972
1973 /*
1974 * Some parts put the VMDq setting in the extra RAH bits,
1975 * so save everything except the lower 16 bits that hold part
1976 * of the address and the address valid bit.
1977 */
1978 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1979 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1980
1981 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1982 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1983
1984 /* clear VMDq pool/queue selection for this RAR */
1985 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1986
1987 return IXGBE_SUCCESS;
1988 }
1989
1990 /**
1991 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1992 * @hw: pointer to hardware structure
1993 *
1994 * Places the MAC address in receive address register 0 and clears the rest
1995 * of the receive address registers. Clears the multicast table. Assumes
1996 * the receiver is in reset when the routine is called.
1997 **/
ixgbe_init_rx_addrs_generic(struct ixgbe_hw * hw)1998 int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1999 {
2000 uint32_t i;
2001 uint32_t rar_entries = hw->mac.num_rar_entries;
2002
2003 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2004
2005 /*
2006 * If the current mac address is valid, assume it is a software override
2007 * to the permanent address.
2008 * Otherwise, use the permanent address from the eeprom.
2009 */
2010 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2011 IXGBE_ERR_INVALID_MAC_ADDR) {
2012 /* Get the MAC address from the RAR0 for later reference */
2013 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2014
2015 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2016 hw->mac.addr[0], hw->mac.addr[1],
2017 hw->mac.addr[2]);
2018 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2019 hw->mac.addr[4], hw->mac.addr[5]);
2020 } else {
2021 /* Setup the receive address. */
2022 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2023 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2024 hw->mac.addr[0], hw->mac.addr[1],
2025 hw->mac.addr[2]);
2026 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2027 hw->mac.addr[4], hw->mac.addr[5]);
2028
2029 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2030 }
2031
2032 /* clear VMDq pool/queue selection for RAR 0 */
2033 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2034
2035 hw->addr_ctrl.overflow_promisc = 0;
2036
2037 hw->addr_ctrl.rar_used_count = 1;
2038
2039 /* Zero out the other receive addresses. */
2040 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2041 for (i = 1; i < rar_entries; i++) {
2042 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2043 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2044 }
2045
2046 /* Clear the MTA */
2047 hw->addr_ctrl.mta_in_use = 0;
2048 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2049
2050 DEBUGOUT(" Clearing MTA\n");
2051 for (i = 0; i < hw->mac.mcft_size; i++)
2052 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2053
2054 ixgbe_init_uta_tables(hw);
2055
2056 return IXGBE_SUCCESS;
2057 }
2058
2059 /**
2060 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2061 * @hw: pointer to hardware structure
2062 * @addr: new address
2063 * @vmdq: VMDq "set" or "pool" index
2064 *
2065 * Adds it to unused receive address register or goes into promiscuous mode.
2066 **/
ixgbe_add_uc_addr(struct ixgbe_hw * hw,uint8_t * addr,uint32_t vmdq)2067 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
2068 {
2069 uint32_t rar_entries = hw->mac.num_rar_entries;
2070 uint32_t rar;
2071
2072 DEBUGFUNC("ixgbe_add_uc_addr");
2073
2074 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2075 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2076
2077 /*
2078 * Place this address in the RAR if there is room,
2079 * else put the controller into promiscuous mode
2080 */
2081 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2082 rar = hw->addr_ctrl.rar_used_count;
2083 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2084 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2085 hw->addr_ctrl.rar_used_count++;
2086 } else {
2087 hw->addr_ctrl.overflow_promisc++;
2088 }
2089
2090 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2091 }
2092
2093 /**
2094 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2095 * @hw: pointer to hardware structure
2096 * @mc_addr: the multicast address
2097 *
2098 * Extracts the 12 bits, from a multicast address, to determine which
2099 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2100 * incoming rx multicast addresses, to determine the bit-vector to check in
2101 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2102 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2103 * to mc_filter_type.
2104 **/
ixgbe_mta_vector(struct ixgbe_hw * hw,uint8_t * mc_addr)2105 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr)
2106 {
2107 uint32_t vector = 0;
2108
2109 DEBUGFUNC("ixgbe_mta_vector");
2110
2111 switch (hw->mac.mc_filter_type) {
2112 case 0: /* use bits [47:36] of the address */
2113 vector = ((mc_addr[4] >> 4) | (((uint16_t)mc_addr[5]) << 4));
2114 break;
2115 case 1: /* use bits [46:35] of the address */
2116 vector = ((mc_addr[4] >> 3) | (((uint16_t)mc_addr[5]) << 5));
2117 break;
2118 case 2: /* use bits [45:34] of the address */
2119 vector = ((mc_addr[4] >> 2) | (((uint16_t)mc_addr[5]) << 6));
2120 break;
2121 case 3: /* use bits [43:32] of the address */
2122 vector = ((mc_addr[4]) | (((uint16_t)mc_addr[5]) << 8));
2123 break;
2124 default: /* Invalid mc_filter_type */
2125 DEBUGOUT("MC filter type param set incorrectly\n");
2126 panic("incorrect multicast filter type");
2127 break;
2128 }
2129
2130 /* vector can only be 12-bits or boundary will be exceeded */
2131 vector &= 0xFFF;
2132 return vector;
2133 }
2134
2135 /**
2136 * ixgbe_set_mta - Set bit-vector in multicast table
2137 * @hw: pointer to hardware structure
2138 * @mc_addr: Multicast address
2139 *
2140 * Sets the bit-vector in the multicast table.
2141 **/
ixgbe_set_mta(struct ixgbe_hw * hw,uint8_t * mc_addr)2142 void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr)
2143 {
2144 uint32_t vector;
2145 uint32_t vector_bit;
2146 uint32_t vector_reg;
2147
2148 DEBUGFUNC("ixgbe_set_mta");
2149
2150 hw->addr_ctrl.mta_in_use++;
2151
2152 vector = ixgbe_mta_vector(hw, mc_addr);
2153 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2154
2155 /*
2156 * The MTA is a register array of 128 32-bit registers. It is treated
2157 * like an array of 4096 bits. We want to set bit
2158 * BitArray[vector_value]. So we figure out what register the bit is
2159 * in, read it, OR in the new bit, then write back the new value. The
2160 * register is determined by the upper 7 bits of the vector value and
2161 * the bit within that register are determined by the lower 5 bits of
2162 * the value.
2163 */
2164 vector_reg = (vector >> 5) & 0x7F;
2165 vector_bit = vector & 0x1F;
2166 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2167 }
2168
2169 /**
2170 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2171 * @hw: pointer to hardware structure
2172 * @mc_addr_list: the list of new multicast addresses
2173 * @mc_addr_count: number of addresses
2174 * @next: iterator function to walk the multicast address list
2175 * @clear: flag, when set clears the table beforehand
2176 *
2177 * When the clear flag is set, the given list replaces any existing list.
2178 * Hashes the given addresses into the multicast table.
2179 **/
ixgbe_update_mc_addr_list_generic(struct ixgbe_hw * hw,uint8_t * mc_addr_list,uint32_t mc_addr_count,ixgbe_mc_addr_itr next,bool clear)2180 int32_t ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *mc_addr_list,
2181 uint32_t mc_addr_count, ixgbe_mc_addr_itr next,
2182 bool clear)
2183 {
2184 uint32_t i;
2185 uint32_t vmdq;
2186
2187 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2188
2189 /*
2190 * Set the new number of MC addresses that we are being requested to
2191 * use.
2192 */
2193 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2194 hw->addr_ctrl.mta_in_use = 0;
2195
2196 /* Clear mta_shadow */
2197 if (clear) {
2198 DEBUGOUT(" Clearing MTA\n");
2199 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2200 }
2201
2202 /* Update mta_shadow */
2203 for (i = 0; i < mc_addr_count; i++) {
2204 DEBUGOUT(" Adding the multicast addresses:\n");
2205 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2206 }
2207
2208 /* Enable mta */
2209 for (i = 0; i < hw->mac.mcft_size; i++)
2210 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2211 hw->mac.mta_shadow[i]);
2212
2213 if (hw->addr_ctrl.mta_in_use > 0)
2214 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2215 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2216
2217 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2218 return IXGBE_SUCCESS;
2219 }
2220
2221 /**
2222 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2223 * @hw: pointer to hardware structure
2224 *
2225 * Enables multicast address in RAR and the use of the multicast hash table.
2226 **/
ixgbe_enable_mc_generic(struct ixgbe_hw * hw)2227 int32_t ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2228 {
2229 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2230
2231 DEBUGFUNC("ixgbe_enable_mc_generic");
2232
2233 if (a->mta_in_use > 0)
2234 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2235 hw->mac.mc_filter_type);
2236
2237 return IXGBE_SUCCESS;
2238 }
2239
2240 /**
2241 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2242 * @hw: pointer to hardware structure
2243 *
2244 * Disables multicast address in RAR and the use of the multicast hash table.
2245 **/
ixgbe_disable_mc_generic(struct ixgbe_hw * hw)2246 int32_t ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2247 {
2248 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2249
2250 DEBUGFUNC("ixgbe_disable_mc_generic");
2251
2252 if (a->mta_in_use > 0)
2253 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2254
2255 return IXGBE_SUCCESS;
2256 }
2257
2258 /**
2259 * ixgbe_fc_enable_generic - Enable flow control
2260 * @hw: pointer to hardware structure
2261 *
2262 * Enable flow control according to the current settings.
2263 **/
ixgbe_fc_enable_generic(struct ixgbe_hw * hw)2264 int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2265 {
2266 int32_t ret_val = IXGBE_SUCCESS;
2267 uint32_t mflcn_reg, fccfg_reg;
2268 uint32_t reg;
2269 uint32_t fcrtl, fcrth;
2270 int i;
2271
2272 DEBUGFUNC("ixgbe_fc_enable_generic");
2273
2274 /* Validate the water mark configuration */
2275 if (!hw->fc.pause_time) {
2276 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2277 goto out;
2278 }
2279
2280 /* Low water mark of zero causes XOFF floods */
2281 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2282 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2283 hw->fc.high_water[i]) {
2284 if (!hw->fc.low_water[i] ||
2285 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2286 DEBUGOUT("Invalid water mark configuration\n");
2287 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2288 goto out;
2289 }
2290 }
2291 }
2292
2293 /* Negotiate the fc mode to use */
2294 hw->mac.ops.fc_autoneg(hw);
2295
2296 /* Disable any previous flow control settings */
2297 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2298 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2299
2300 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2301 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2302
2303 /*
2304 * The possible values of fc.current_mode are:
2305 * 0: Flow control is completely disabled
2306 * 1: Rx flow control is enabled (we can receive pause frames,
2307 * but not send pause frames).
2308 * 2: Tx flow control is enabled (we can send pause frames but
2309 * we do not support receiving pause frames).
2310 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2311 * other: Invalid.
2312 */
2313 switch (hw->fc.current_mode) {
2314 case ixgbe_fc_none:
2315 /*
2316 * Flow control is disabled by software override or autoneg.
2317 * The code below will actually disable it in the HW.
2318 */
2319 break;
2320 case ixgbe_fc_rx_pause:
2321 /*
2322 * Rx Flow control is enabled and Tx Flow control is
2323 * disabled by software override. Since there really
2324 * isn't a way to advertise that we are capable of RX
2325 * Pause ONLY, we will advertise that we support both
2326 * symmetric and asymmetric Rx PAUSE. Later, we will
2327 * disable the adapter's ability to send PAUSE frames.
2328 */
2329 mflcn_reg |= IXGBE_MFLCN_RFCE;
2330 break;
2331 case ixgbe_fc_tx_pause:
2332 /*
2333 * Tx Flow control is enabled, and Rx Flow control is
2334 * disabled by software override.
2335 */
2336 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2337 break;
2338 case ixgbe_fc_full:
2339 /* Flow control (both Rx and Tx) is enabled by SW override. */
2340 mflcn_reg |= IXGBE_MFLCN_RFCE;
2341 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2342 break;
2343 default:
2344 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2345 "Flow control param set incorrectly\n");
2346 ret_val = IXGBE_ERR_CONFIG;
2347 goto out;
2348 break;
2349 }
2350
2351 /* Set 802.3x based flow control settings. */
2352 mflcn_reg |= IXGBE_MFLCN_DPF;
2353 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2354 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2355
2356
2357 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2358 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2359 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2360 hw->fc.high_water[i]) {
2361 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2362 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2363 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2364 } else {
2365 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2366 /*
2367 * In order to prevent Tx hangs when the internal Tx
2368 * switch is enabled we must set the high water mark
2369 * to the Rx packet buffer size - 24KB. This allows
2370 * the Tx switch to function even under heavy Rx
2371 * workloads.
2372 */
2373 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 0x6000;
2374 }
2375
2376 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2377 }
2378
2379 /* Configure pause time (2 TCs per register) */
2380 reg = (uint32_t)hw->fc.pause_time * 0x00010001;
2381 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2382 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2383
2384 /* Configure flow control refresh threshold value */
2385 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2386
2387 out:
2388 return ret_val;
2389 }
2390
2391 /**
2392 * ixgbe_negotiate_fc - Negotiate flow control
2393 * @hw: pointer to hardware structure
2394 * @adv_reg: flow control advertised settings
2395 * @lp_reg: link partner's flow control settings
2396 * @adv_sym: symmetric pause bit in advertisement
2397 * @adv_asm: asymmetric pause bit in advertisement
2398 * @lp_sym: symmetric pause bit in link partner advertisement
2399 * @lp_asm: asymmetric pause bit in link partner advertisement
2400 *
2401 * Find the intersection between advertised settings and link partner's
2402 * advertised settings
2403 **/
ixgbe_negotiate_fc(struct ixgbe_hw * hw,uint32_t adv_reg,uint32_t lp_reg,uint32_t adv_sym,uint32_t adv_asm,uint32_t lp_sym,uint32_t lp_asm)2404 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
2405 uint32_t lp_reg, uint32_t adv_sym,
2406 uint32_t adv_asm, uint32_t lp_sym,
2407 uint32_t lp_asm)
2408 {
2409 if ((!(adv_reg)) || (!(lp_reg))) {
2410 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2411 "Local or link partner's advertised flow control "
2412 "settings are NULL. Local: %x, link partner: %x\n",
2413 adv_reg, lp_reg);
2414 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2415 }
2416
2417 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2418 /*
2419 * Now we need to check if the user selected Rx ONLY
2420 * of pause frames. In this case, we had to advertise
2421 * FULL flow control because we could not advertise RX
2422 * ONLY. Hence, we must now check to see if we need to
2423 * turn OFF the TRANSMISSION of PAUSE frames.
2424 */
2425 if (hw->fc.requested_mode == ixgbe_fc_full) {
2426 hw->fc.current_mode = ixgbe_fc_full;
2427 DEBUGOUT("Flow Control = FULL.\n");
2428 } else {
2429 hw->fc.current_mode = ixgbe_fc_rx_pause;
2430 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2431 }
2432 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2433 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2434 hw->fc.current_mode = ixgbe_fc_tx_pause;
2435 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2436 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2437 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2438 hw->fc.current_mode = ixgbe_fc_rx_pause;
2439 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2440 } else {
2441 hw->fc.current_mode = ixgbe_fc_none;
2442 DEBUGOUT("Flow Control = NONE.\n");
2443 }
2444 return IXGBE_SUCCESS;
2445 }
2446
2447 /**
2448 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2449 * @hw: pointer to hardware structure
2450 *
2451 * Enable flow control according on 1 gig fiber.
2452 **/
ixgbe_fc_autoneg_fiber(struct ixgbe_hw * hw)2453 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2454 {
2455 uint32_t pcs_anadv_reg, pcs_lpab_reg, linkstat;
2456 int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2457
2458 /*
2459 * On multispeed fiber at 1g, bail out if
2460 * - link is up but AN did not complete, or if
2461 * - link is up and AN completed but timed out
2462 */
2463
2464 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2465 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2466 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2467 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2468 goto out;
2469 }
2470
2471 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2472 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2473
2474 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2475 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2476 IXGBE_PCS1GANA_ASM_PAUSE,
2477 IXGBE_PCS1GANA_SYM_PAUSE,
2478 IXGBE_PCS1GANA_ASM_PAUSE);
2479
2480 out:
2481 return ret_val;
2482 }
2483
2484 /**
2485 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2486 * @hw: pointer to hardware structure
2487 *
2488 * Enable flow control according to IEEE clause 37.
2489 **/
ixgbe_fc_autoneg_backplane(struct ixgbe_hw * hw)2490 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2491 {
2492 uint32_t links2, anlp1_reg, autoc_reg, links;
2493 int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2494
2495 /*
2496 * On backplane, bail out if
2497 * - backplane autoneg was not completed, or if
2498 * - we are 82599 and link partner is not AN enabled
2499 */
2500 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2501 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2502 DEBUGOUT("Auto-Negotiation did not complete\n");
2503 goto out;
2504 }
2505
2506 if (hw->mac.type == ixgbe_mac_82599EB) {
2507 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2508 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2509 DEBUGOUT("Link partner is not AN enabled\n");
2510 goto out;
2511 }
2512 }
2513 /*
2514 * Read the 10g AN autoc and LP ability registers and resolve
2515 * local flow control settings accordingly
2516 */
2517 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2518 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2519
2520 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2521 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2522 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2523
2524 out:
2525 return ret_val;
2526 }
2527
2528 /**
2529 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2530 * @hw: pointer to hardware structure
2531 *
2532 * Enable flow control according to IEEE clause 37.
2533 **/
ixgbe_fc_autoneg_copper(struct ixgbe_hw * hw)2534 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2535 {
2536 uint16_t technology_ability_reg = 0;
2537 uint16_t lp_technology_ability_reg = 0;
2538
2539 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2540 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2541 &technology_ability_reg);
2542 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2543 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2544 &lp_technology_ability_reg);
2545
2546 return ixgbe_negotiate_fc(hw, (uint32_t)technology_ability_reg,
2547 (uint32_t)lp_technology_ability_reg,
2548 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2549 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2550 }
2551
2552 /**
2553 * ixgbe_fc_autoneg - Configure flow control
2554 * @hw: pointer to hardware structure
2555 *
2556 * Compares our advertised flow control capabilities to those advertised by
2557 * our link partner, and determines the proper flow control mode to use.
2558 **/
ixgbe_fc_autoneg(struct ixgbe_hw * hw)2559 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2560 {
2561 int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2562 ixgbe_link_speed speed;
2563 bool link_up;
2564
2565 DEBUGFUNC("ixgbe_fc_autoneg");
2566
2567 /*
2568 * AN should have completed when the cable was plugged in.
2569 * Look for reasons to bail out. Bail out if:
2570 * - FC autoneg is disabled, or if
2571 * - link is not up.
2572 */
2573 if (hw->fc.disable_fc_autoneg) {
2574 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2575 "Flow control autoneg is disabled");
2576 goto out;
2577 }
2578
2579 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2580 if (!link_up) {
2581 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
2582 goto out;
2583 }
2584
2585 switch (hw->phy.media_type) {
2586 /* Autoneg flow control on fiber adapters */
2587 case ixgbe_media_type_fiber_fixed:
2588 case ixgbe_media_type_fiber_qsfp:
2589 case ixgbe_media_type_fiber:
2590 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2591 ret_val = ixgbe_fc_autoneg_fiber(hw);
2592 break;
2593
2594 /* Autoneg flow control on backplane adapters */
2595 case ixgbe_media_type_backplane:
2596 ret_val = ixgbe_fc_autoneg_backplane(hw);
2597 break;
2598
2599 /* Autoneg flow control on copper adapters */
2600 case ixgbe_media_type_copper:
2601 if (ixgbe_device_supports_autoneg_fc(hw))
2602 ret_val = ixgbe_fc_autoneg_copper(hw);
2603 break;
2604
2605 default:
2606 break;
2607 }
2608
2609 out:
2610 if (ret_val == IXGBE_SUCCESS) {
2611 hw->fc.fc_was_autonegged = TRUE;
2612 } else {
2613 hw->fc.fc_was_autonegged = FALSE;
2614 hw->fc.current_mode = hw->fc.requested_mode;
2615 }
2616 }
2617
2618 /*
2619 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2620 * @hw: pointer to hardware structure
2621 *
2622 * System-wide timeout range is encoded in PCIe Device Control2 register.
2623 *
2624 * Add 10% to specified maximum and return the number of times to poll for
2625 * completion timeout, in units of 100 microsec. Never return less than
2626 * 800 = 80 millisec.
2627 */
ixgbe_pcie_timeout_poll(struct ixgbe_hw * hw)2628 static uint32_t ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2629 {
2630 int16_t devctl2;
2631 uint32_t pollcnt;
2632
2633 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
2634 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2635
2636 switch (devctl2) {
2637 case IXGBE_PCIDEVCTRL2_65_130ms:
2638 pollcnt = 1300; /* 130 millisec */
2639 break;
2640 case IXGBE_PCIDEVCTRL2_260_520ms:
2641 pollcnt = 5200; /* 520 millisec */
2642 break;
2643 case IXGBE_PCIDEVCTRL2_1_2s:
2644 pollcnt = 20000; /* 2 sec */
2645 break;
2646 case IXGBE_PCIDEVCTRL2_4_8s:
2647 pollcnt = 80000; /* 8 sec */
2648 break;
2649 case IXGBE_PCIDEVCTRL2_17_34s:
2650 pollcnt = 34000; /* 34 sec */
2651 break;
2652 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
2653 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
2654 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
2655 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
2656 default:
2657 pollcnt = 800; /* 80 millisec minimum */
2658 break;
2659 }
2660
2661 /* add 10% to spec maximum */
2662 return (pollcnt * 11) / 10;
2663 }
2664
2665 /**
2666 * ixgbe_disable_pcie_master - Disable PCI-express master access
2667 * @hw: pointer to hardware structure
2668 *
2669 * Disables PCI-Express master access and verifies there are no pending
2670 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2671 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2672 * is returned signifying master requests disabled.
2673 **/
ixgbe_disable_pcie_master(struct ixgbe_hw * hw)2674 int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2675 {
2676 int32_t status = IXGBE_SUCCESS;
2677 uint32_t i, poll;
2678 uint16_t value;
2679
2680 DEBUGFUNC("ixgbe_disable_pcie_master");
2681
2682 /* Always set this bit to ensure any future transactions are blocked */
2683 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2684
2685 /* Exit if master requests are blocked */
2686 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
2687 IXGBE_REMOVED(hw->hw_addr))
2688 goto out;
2689
2690 /* Poll for master request bit to clear */
2691 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2692 usec_delay(100);
2693 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2694 goto out;
2695 }
2696
2697 /*
2698 * Two consecutive resets are required via CTRL.RST per datasheet
2699 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2700 * of this need. The first reset prevents new master requests from
2701 * being issued by our device. We then must wait 1usec or more for any
2702 * remaining completions from the PCIe bus to trickle in, and then reset
2703 * again to clear out any effects they may have had on our device.
2704 */
2705 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2706 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2707
2708 if (hw->mac.type >= ixgbe_mac_X550)
2709 goto out;
2710
2711 /*
2712 * Before proceeding, make sure that the PCIe block does not have
2713 * transactions pending.
2714 */
2715 poll = ixgbe_pcie_timeout_poll(hw);
2716 for (i = 0; i < poll; i++) {
2717 usec_delay(100);
2718 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
2719 if (IXGBE_REMOVED(hw->hw_addr))
2720 goto out;
2721 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2722 goto out;
2723 }
2724
2725 ERROR_REPORT1(IXGBE_ERROR_POLLING,
2726 "PCIe transaction pending bit also did not clear.\n");
2727 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2728
2729 out:
2730 return status;
2731 }
2732
2733 /**
2734 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2735 * @hw: pointer to hardware structure
2736 * @mask: Mask to specify which semaphore to acquire
2737 *
2738 * Acquires the SWFW semaphore through the GSSR register for the specified
2739 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2740 **/
ixgbe_acquire_swfw_sync(struct ixgbe_hw * hw,uint32_t mask)2741 int32_t ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, uint32_t mask)
2742 {
2743 uint32_t gssr = 0;
2744 uint32_t swmask = mask;
2745 uint32_t fwmask = mask << 5;
2746 uint32_t timeout = 200;
2747 uint32_t i;
2748
2749 DEBUGFUNC("ixgbe_acquire_swfw_sync");
2750
2751 for (i = 0; i < timeout; i++) {
2752 /*
2753 * SW NVM semaphore bit is used for access to all
2754 * SW_FW_SYNC bits (not just NVM)
2755 */
2756 if (ixgbe_get_eeprom_semaphore(hw))
2757 return IXGBE_ERR_SWFW_SYNC;
2758
2759 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2760 if (!(gssr & (fwmask | swmask))) {
2761 gssr |= swmask;
2762 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2763 ixgbe_release_eeprom_semaphore(hw);
2764 return IXGBE_SUCCESS;
2765 } else {
2766 /* Resource is currently in use by FW or SW */
2767 ixgbe_release_eeprom_semaphore(hw);
2768 msec_delay(5);
2769 }
2770 }
2771
2772 /* If time expired clear the bits holding the lock and retry */
2773 if (gssr & (fwmask | swmask))
2774 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2775
2776 msec_delay(5);
2777 return IXGBE_ERR_SWFW_SYNC;
2778 }
2779
2780 /**
2781 * ixgbe_release_swfw_sync - Release SWFW semaphore
2782 * @hw: pointer to hardware structure
2783 * @mask: Mask to specify which semaphore to release
2784 *
2785 * Releases the SWFW semaphore through the GSSR register for the specified
2786 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2787 **/
ixgbe_release_swfw_sync(struct ixgbe_hw * hw,uint32_t mask)2788 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, uint32_t mask)
2789 {
2790 uint32_t gssr;
2791 uint32_t swmask = mask;
2792
2793 DEBUGFUNC("ixgbe_release_swfw_sync");
2794
2795 ixgbe_get_eeprom_semaphore(hw);
2796
2797 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2798 gssr &= ~swmask;
2799 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2800
2801 ixgbe_release_eeprom_semaphore(hw);
2802 }
2803
2804 /**
2805 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2806 * @hw: pointer to hardware structure
2807 *
2808 * Stops the receive data path and waits for the HW to internally empty
2809 * the Rx security block
2810 **/
ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw * hw)2811 int32_t ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2812 {
2813 #define IXGBE_MAX_SECRX_POLL 40
2814
2815 int i;
2816 int secrxreg;
2817
2818 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2819
2820
2821 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2822 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2823 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2824 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2825 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2826 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2827 break;
2828 else
2829 /* Use interrupt-safe sleep just in case */
2830 usec_delay(1000);
2831 }
2832
2833 /* For informational purposes only */
2834 if (i >= IXGBE_MAX_SECRX_POLL)
2835 DEBUGOUT("Rx unit being enabled before security "
2836 "path fully disabled. Continuing with init.\n");
2837
2838 return IXGBE_SUCCESS;
2839 }
2840
2841 /**
2842 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
2843 * @hw: pointer to hardware structure
2844 * @locked: bool to indicate whether the SW/FW lock was taken
2845 * @reg_val: Value we read from AUTOC
2846 *
2847 * The default case requires no protection so just to the register read.
2848 */
prot_autoc_read_generic(struct ixgbe_hw * hw,bool * locked,uint32_t * reg_val)2849 int32_t prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked,
2850 uint32_t *reg_val)
2851 {
2852 *locked = FALSE;
2853 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2854 return IXGBE_SUCCESS;
2855 }
2856
2857 /**
2858 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
2859 * @hw: pointer to hardware structure
2860 * @reg_val: value to write to AUTOC
2861 * @locked: bool to indicate whether the SW/FW lock was already taken by
2862 * previous read.
2863 *
2864 * The default case requires no protection so just to the register write.
2865 */
prot_autoc_write_generic(struct ixgbe_hw * hw,uint32_t reg_val,bool locked)2866 int32_t prot_autoc_write_generic(struct ixgbe_hw *hw, uint32_t reg_val,
2867 bool locked)
2868 {
2869 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2870 return IXGBE_SUCCESS;
2871 }
2872
2873 /**
2874 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2875 * @hw: pointer to hardware structure
2876 *
2877 * Enables the receive data path.
2878 **/
ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw * hw)2879 int32_t ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2880 {
2881 uint32_t secrxreg;
2882
2883 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2884
2885 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2886 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2887 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2888 IXGBE_WRITE_FLUSH(hw);
2889
2890 return IXGBE_SUCCESS;
2891 }
2892
2893 /**
2894 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2895 * @hw: pointer to hardware structure
2896 * @regval: register value to write to RXCTRL
2897 *
2898 * Enables the Rx DMA unit
2899 **/
ixgbe_enable_rx_dma_generic(struct ixgbe_hw * hw,uint32_t regval)2900 int32_t ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, uint32_t regval)
2901 {
2902 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2903
2904 if (regval & IXGBE_RXCTRL_RXEN)
2905 ixgbe_enable_rx(hw);
2906 else
2907 ixgbe_disable_rx(hw);
2908
2909 return IXGBE_SUCCESS;
2910 }
2911
2912 /**
2913 * ixgbe_blink_led_start_generic - Blink LED based on index.
2914 * @hw: pointer to hardware structure
2915 * @index: led number to blink
2916 **/
ixgbe_blink_led_start_generic(struct ixgbe_hw * hw,uint32_t index)2917 int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index)
2918 {
2919 ixgbe_link_speed speed = 0;
2920 bool link_up = 0;
2921 uint32_t autoc_reg = 0;
2922 uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2923 int32_t ret_val = IXGBE_SUCCESS;
2924 bool locked = FALSE;
2925
2926 DEBUGFUNC("ixgbe_blink_led_start_generic");
2927
2928 if (index > 3)
2929 return IXGBE_ERR_PARAM;
2930
2931 /*
2932 * Link must be up to auto-blink the LEDs;
2933 * Force it if link is down.
2934 */
2935 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2936
2937 if (!link_up) {
2938 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2939 if (ret_val != IXGBE_SUCCESS)
2940 goto out;
2941
2942 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2943 autoc_reg |= IXGBE_AUTOC_FLU;
2944
2945 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2946 if (ret_val != IXGBE_SUCCESS)
2947 goto out;
2948
2949 IXGBE_WRITE_FLUSH(hw);
2950 msec_delay(10);
2951 }
2952
2953 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2954 led_reg |= IXGBE_LED_BLINK(index);
2955 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2956 IXGBE_WRITE_FLUSH(hw);
2957
2958 out:
2959 return ret_val;
2960 }
2961
2962 /**
2963 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2964 * @hw: pointer to hardware structure
2965 * @index: led number to stop blinking
2966 **/
ixgbe_blink_led_stop_generic(struct ixgbe_hw * hw,uint32_t index)2967 int32_t ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, uint32_t index)
2968 {
2969 uint32_t autoc_reg = 0;
2970 uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2971 int32_t ret_val = IXGBE_SUCCESS;
2972 bool locked = FALSE;
2973
2974 DEBUGFUNC("ixgbe_blink_led_stop_generic");
2975
2976 if (index > 3)
2977 return IXGBE_ERR_PARAM;
2978
2979 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2980 if (ret_val != IXGBE_SUCCESS)
2981 goto out;
2982
2983 autoc_reg &= ~IXGBE_AUTOC_FLU;
2984 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2985
2986 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2987 if (ret_val != IXGBE_SUCCESS)
2988 goto out;
2989
2990 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2991 led_reg &= ~IXGBE_LED_BLINK(index);
2992 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2993 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2994 IXGBE_WRITE_FLUSH(hw);
2995
2996 out:
2997 return ret_val;
2998 }
2999
3000 /**
3001 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3002 * @hw: pointer to hardware structure
3003 *
3004 * Read PCIe configuration space, and get the MSI-X vector count from
3005 * the capabilities table.
3006 **/
ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw * hw)3007 uint16_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3008 {
3009 uint16_t msix_count = 1;
3010 uint16_t max_msix_count;
3011 uint16_t pcie_offset;
3012
3013 switch (hw->mac.type) {
3014 case ixgbe_mac_82598EB:
3015 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3016 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3017 break;
3018 case ixgbe_mac_82599EB:
3019 case ixgbe_mac_X540:
3020 case ixgbe_mac_X550:
3021 case ixgbe_mac_X550EM_x:
3022 case ixgbe_mac_X550EM_a:
3023 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3024 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3025 break;
3026 default:
3027 return msix_count;
3028 }
3029
3030 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3031 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3032 if (IXGBE_REMOVED(hw->hw_addr))
3033 msix_count = 0;
3034 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3035
3036 /* MSI-X count is zero-based in HW */
3037 msix_count++;
3038
3039 if (msix_count > max_msix_count)
3040 msix_count = max_msix_count;
3041
3042 return msix_count;
3043 }
3044
3045 /**
3046 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3047 * @hw: pointer to hardware structure
3048 * @addr: Address to put into receive address register
3049 * @vmdq: VMDq pool to assign
3050 *
3051 * Puts an ethernet address into a receive address register, or
3052 * finds the rar that it is already in; adds to the pool list
3053 **/
ixgbe_insert_mac_addr_generic(struct ixgbe_hw * hw,uint8_t * addr,uint32_t vmdq)3054 int32_t ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
3055 {
3056 static const uint32_t NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3057 uint32_t first_empty_rar = NO_EMPTY_RAR_FOUND;
3058 uint32_t rar;
3059 uint32_t rar_low, rar_high;
3060 uint32_t addr_low, addr_high;
3061
3062 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3063
3064 /* swap bytes for HW little endian */
3065 addr_low = addr[0] | (addr[1] << 8)
3066 | (addr[2] << 16)
3067 | (addr[3] << 24);
3068 addr_high = addr[4] | (addr[5] << 8);
3069
3070 /*
3071 * Either find the mac_id in rar or find the first empty space.
3072 * rar_highwater points to just after the highest currently used
3073 * rar in order to shorten the search. It grows when we add a new
3074 * rar to the top.
3075 */
3076 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3077 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3078
3079 if (((IXGBE_RAH_AV & rar_high) == 0)
3080 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3081 first_empty_rar = rar;
3082 } else if ((rar_high & 0xFFFF) == addr_high) {
3083 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3084 if (rar_low == addr_low)
3085 break; /* found it already in the rars */
3086 }
3087 }
3088
3089 if (rar < hw->mac.rar_highwater) {
3090 /* already there so just add to the pool bits */
3091 ixgbe_set_vmdq(hw, rar, vmdq);
3092 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3093 /* stick it into first empty RAR slot we found */
3094 rar = first_empty_rar;
3095 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3096 } else if (rar == hw->mac.rar_highwater) {
3097 /* add it to the top of the list and inc the highwater mark */
3098 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3099 hw->mac.rar_highwater++;
3100 } else if (rar >= hw->mac.num_rar_entries) {
3101 return IXGBE_ERR_INVALID_MAC_ADDR;
3102 }
3103
3104 /*
3105 * If we found rar[0], make sure the default pool bit (we use pool 0)
3106 * remains cleared to be sure default pool packets will get delivered
3107 */
3108 if (rar == 0)
3109 ixgbe_clear_vmdq(hw, rar, 0);
3110
3111 return rar;
3112 }
3113
3114 /**
3115 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3116 * @hw: pointer to hardware struct
3117 * @rar: receive address register index to disassociate
3118 * @vmdq: VMDq pool index to remove from the rar
3119 **/
ixgbe_clear_vmdq_generic(struct ixgbe_hw * hw,uint32_t rar,uint32_t vmdq)3120 int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3121 {
3122 uint32_t mpsar_lo, mpsar_hi;
3123 uint32_t rar_entries = hw->mac.num_rar_entries;
3124
3125 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3126
3127 /* Make sure we are using a valid rar index range */
3128 if (rar >= rar_entries) {
3129 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3130 "RAR index %d is out of range.\n", rar);
3131 return IXGBE_ERR_INVALID_ARGUMENT;
3132 }
3133
3134 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3135 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3136
3137 if (IXGBE_REMOVED(hw->hw_addr))
3138 goto done;
3139
3140 if (!mpsar_lo && !mpsar_hi)
3141 goto done;
3142
3143 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3144 if (mpsar_lo) {
3145 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3146 mpsar_lo = 0;
3147 }
3148 if (mpsar_hi) {
3149 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3150 mpsar_hi = 0;
3151 }
3152 } else if (vmdq < 32) {
3153 mpsar_lo &= ~(1 << vmdq);
3154 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3155 } else {
3156 mpsar_hi &= ~(1 << (vmdq - 32));
3157 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3158 }
3159
3160 /* was that the last pool using this rar? */
3161 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3162 hw->mac.ops.clear_rar(hw, rar);
3163 done:
3164 return IXGBE_SUCCESS;
3165 }
3166
3167 /**
3168 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3169 * @hw: pointer to hardware struct
3170 * @rar: receive address register index to associate with a VMDq index
3171 * @vmdq: VMDq pool index
3172 **/
ixgbe_set_vmdq_generic(struct ixgbe_hw * hw,uint32_t rar,uint32_t vmdq)3173 int32_t ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3174 {
3175 uint32_t mpsar;
3176 uint32_t rar_entries = hw->mac.num_rar_entries;
3177
3178 DEBUGFUNC("ixgbe_set_vmdq_generic");
3179
3180 /* Make sure we are using a valid rar index range */
3181 if (rar >= rar_entries) {
3182 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3183 "RAR index %d is out of range.\n", rar);
3184 return IXGBE_ERR_INVALID_ARGUMENT;
3185 }
3186
3187 if (vmdq < 32) {
3188 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3189 mpsar |= 1 << vmdq;
3190 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3191 } else {
3192 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3193 mpsar |= 1 << (vmdq - 32);
3194 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3195 }
3196 return IXGBE_SUCCESS;
3197 }
3198
3199 /**
3200 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3201 * @hw: pointer to hardware structure
3202 **/
ixgbe_init_uta_tables_generic(struct ixgbe_hw * hw)3203 int32_t ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3204 {
3205 int i;
3206
3207 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3208 DEBUGOUT(" Clearing UTA\n");
3209
3210 for (i = 0; i < 128; i++)
3211 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3212
3213 return IXGBE_SUCCESS;
3214 }
3215
3216 /**
3217 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3218 * @hw: pointer to hardware structure
3219 * @vlan: VLAN id to write to VLAN filter
3220 * @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if
3221 * vlanid not found
3222 *
3223 *
3224 * return the VLVF index where this VLAN id should be placed
3225 *
3226 **/
ixgbe_find_vlvf_slot(struct ixgbe_hw * hw,uint32_t vlan,bool vlvf_bypass)3227 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan, bool vlvf_bypass)
3228 {
3229 int32_t regindex, first_empty_slot;
3230 uint32_t bits;
3231
3232 /* short cut the special case */
3233 if (vlan == 0)
3234 return 0;
3235
3236 /* if vlvf_bypass is set we don't want to use an empty slot, we
3237 * will simply bypass the VLVF if there are no entries present in the
3238 * VLVF that contain our VLAN
3239 */
3240 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3241
3242 /* add VLAN enable bit for comparison */
3243 vlan |= IXGBE_VLVF_VIEN;
3244
3245 /* Search for the vlan id in the VLVF entries. Save off the first empty
3246 * slot found along the way.
3247 *
3248 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3249 */
3250 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3251 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3252 if (bits == vlan)
3253 return regindex;
3254 if (!first_empty_slot && !bits)
3255 first_empty_slot = regindex;
3256 }
3257
3258 /* If we are here then we didn't find the VLAN. Return first empty
3259 * slot we found during our search, else error.
3260 */
3261 if (!first_empty_slot)
3262 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3263
3264 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3265 }
3266
3267 /**
3268 * ixgbe_set_vfta_generic - Set VLAN filter table
3269 * @hw: pointer to hardware structure
3270 * @vlan: VLAN id to write to VLAN filter
3271 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3272 * @vlan_on: boolean flag to turn on/off VLAN
3273 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3274 *
3275 * Turn on/off specified VLAN in the VLAN filter table.
3276 **/
ixgbe_set_vfta_generic(struct ixgbe_hw * hw,uint32_t vlan,uint32_t vind,bool vlan_on,bool vlvf_bypass)3277 int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3278 bool vlan_on, bool vlvf_bypass)
3279 {
3280 uint32_t regidx, vfta_delta, vfta;
3281 int32_t ret_val;
3282
3283 DEBUGFUNC("ixgbe_set_vfta_generic");
3284
3285 if (vlan > 4095 || vind > 63)
3286 return IXGBE_ERR_PARAM;
3287
3288 /*
3289 * this is a 2 part operation - first the VFTA, then the
3290 * VLVF and VLVFB if VT Mode is set
3291 * We don't write the VFTA until we know the VLVF part succeeded.
3292 */
3293
3294 /* Part 1
3295 * The VFTA is a bitstring made up of 128 32-bit registers
3296 * that enable the particular VLAN id, much like the MTA:
3297 * bits[11-5]: which register
3298 * bits[4-0]: which bit in the register
3299 */
3300 regidx = vlan / 32;
3301 vfta_delta = 1 << (vlan % 32);
3302 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3303
3304 /*
3305 * vfta_delta represents the difference between the current value
3306 * of vfta and the value we want in the register. Since the diff
3307 * is an XOR mask we can just update the vfta using an XOR
3308 */
3309 vfta_delta &= vlan_on ? ~vfta : vfta;
3310 vfta ^= vfta_delta;
3311
3312 /* Part 2
3313 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3314 */
3315 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
3316 vfta, vlvf_bypass);
3317 if (ret_val != IXGBE_SUCCESS) {
3318 if (vlvf_bypass)
3319 goto vfta_update;
3320 return ret_val;
3321 }
3322
3323 vfta_update:
3324 /* Update VFTA now that we are ready for traffic */
3325 if (vfta_delta)
3326 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3327
3328 return IXGBE_SUCCESS;
3329 }
3330
3331 /**
3332 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3333 * @hw: pointer to hardware structure
3334 * @vlan: VLAN id to write to VLAN filter
3335 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3336 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
3337 * @vfta_delta: pointer to the difference between the current value of VFTA
3338 * and the desired value
3339 * @vfta: the desired value of the VFTA
3340 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3341 *
3342 * Turn on/off specified bit in VLVF table.
3343 **/
ixgbe_set_vlvf_generic(struct ixgbe_hw * hw,uint32_t vlan,uint32_t vind,bool vlan_on,uint32_t * vfta_delta,uint32_t vfta,bool vlvf_bypass)3344 int32_t ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3345 bool vlan_on, uint32_t *vfta_delta, uint32_t vfta,
3346 bool vlvf_bypass)
3347 {
3348 uint32_t bits;
3349 int32_t vlvf_index;
3350
3351 DEBUGFUNC("ixgbe_set_vlvf_generic");
3352
3353 if (vlan > 4095 || vind > 63)
3354 return IXGBE_ERR_PARAM;
3355
3356 /* If VT Mode is set
3357 * Either vlan_on
3358 * make sure the vlan is in VLVF
3359 * set the vind bit in the matching VLVFB
3360 * Or !vlan_on
3361 * clear the pool bit and possibly the vind
3362 */
3363 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
3364 return IXGBE_SUCCESS;
3365
3366 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
3367 if (vlvf_index < 0)
3368 return vlvf_index;
3369
3370 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
3371
3372 /* set the pool bit */
3373 bits |= 1 << (vind % 32);
3374 if (vlan_on)
3375 goto vlvf_update;
3376
3377 /* clear the pool bit */
3378 bits ^= 1 << (vind % 32);
3379
3380 if (!bits &&
3381 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
3382 /* Clear VFTA first, then disable VLVF. Otherwise
3383 * we run the risk of stray packets leaking into
3384 * the PF via the default pool
3385 */
3386 if (*vfta_delta)
3387 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
3388
3389 /* disable VLVF and clear remaining bit from pool */
3390 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3391 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
3392
3393 return IXGBE_SUCCESS;
3394 }
3395
3396 /* If there are still bits set in the VLVFB registers
3397 * for the VLAN ID indicated we need to see if the
3398 * caller is requesting that we clear the VFTA entry bit.
3399 * If the caller has requested that we clear the VFTA
3400 * entry bit but there are still pools/VFs using this VLAN
3401 * ID entry then ignore the request. We're not worried
3402 * about the case where we're turning the VFTA VLAN ID
3403 * entry bit on, only when requested to turn it off as
3404 * there may be multiple pools and/or VFs using the
3405 * VLAN ID entry. In that case we cannot clear the
3406 * VFTA bit until all pools/VFs using that VLAN ID have also
3407 * been cleared. This will be indicated by "bits" being
3408 * zero.
3409 */
3410 *vfta_delta = 0;
3411
3412 vlvf_update:
3413 /* record pool change and enable VLAN ID if not already enabled */
3414 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
3415 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
3416
3417 return IXGBE_SUCCESS;
3418 }
3419
3420 /**
3421 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3422 * @hw: pointer to hardware structure
3423 *
3424 * Clears the VLAN filer table, and the VMDq index associated with the filter
3425 **/
ixgbe_clear_vfta_generic(struct ixgbe_hw * hw)3426 int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3427 {
3428 uint32_t offset;
3429
3430 DEBUGFUNC("ixgbe_clear_vfta_generic");
3431
3432 for (offset = 0; offset < hw->mac.vft_size; offset++)
3433 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3434
3435 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3436 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3437 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3438 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3439 }
3440
3441 return IXGBE_SUCCESS;
3442 }
3443
3444 /**
3445 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
3446 * @hw: pointer to hardware structure
3447 *
3448 * Contains the logic to identify if we need to verify link for the
3449 * crosstalk fix
3450 **/
ixgbe_need_crosstalk_fix(struct ixgbe_hw * hw)3451 bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
3452 {
3453
3454 /* Does FW say we need the fix */
3455 if (!hw->need_crosstalk_fix)
3456 return FALSE;
3457
3458 /* Only consider SFP+ PHYs i.e. media type fiber */
3459 switch (hw->mac.ops.get_media_type(hw)) {
3460 case ixgbe_media_type_fiber:
3461 case ixgbe_media_type_fiber_qsfp:
3462 break;
3463 default:
3464 return FALSE;
3465 }
3466
3467 return TRUE;
3468 }
3469
3470 /**
3471 * ixgbe_check_mac_link_generic - Determine link and speed status
3472 * @hw: pointer to hardware structure
3473 * @speed: pointer to link speed
3474 * @link_up: TRUE when link is up
3475 * @link_up_wait_to_complete: bool used to wait for link up or not
3476 *
3477 * Reads the links register to determine if link is up and the current speed
3478 **/
ixgbe_check_mac_link_generic(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)3479 int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3480 bool *link_up, bool link_up_wait_to_complete)
3481 {
3482 uint32_t links_reg, links_orig;
3483 uint32_t i;
3484
3485 DEBUGFUNC("ixgbe_check_mac_link_generic");
3486
3487 /* If Crosstalk fix enabled do the sanity check of making sure
3488 * the SFP+ cage is full.
3489 */
3490 if (ixgbe_need_crosstalk_fix(hw)) {
3491 uint32_t sfp_cage_full;
3492
3493 switch (hw->mac.type) {
3494 case ixgbe_mac_82599EB:
3495 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3496 IXGBE_ESDP_SDP2;
3497 break;
3498 case ixgbe_mac_X550EM_x:
3499 case ixgbe_mac_X550EM_a:
3500 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3501 IXGBE_ESDP_SDP0;
3502 break;
3503 default:
3504 /* sanity check - No SFP+ devices here */
3505 sfp_cage_full = FALSE;
3506 break;
3507 }
3508
3509 if (!sfp_cage_full) {
3510 *link_up = FALSE;
3511 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3512 return IXGBE_SUCCESS;
3513 }
3514 }
3515
3516 /* clear the old state */
3517 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3518
3519 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3520
3521 if (links_orig != links_reg) {
3522 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3523 links_orig, links_reg);
3524 }
3525
3526 if (link_up_wait_to_complete) {
3527 for (i = 0; i < hw->mac.max_link_up_time; i++) {
3528 if (links_reg & IXGBE_LINKS_UP) {
3529 *link_up = TRUE;
3530 break;
3531 } else {
3532 *link_up = FALSE;
3533 }
3534 msec_delay(100);
3535 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3536 }
3537 } else {
3538 if (links_reg & IXGBE_LINKS_UP)
3539 *link_up = TRUE;
3540 else
3541 *link_up = FALSE;
3542 }
3543
3544 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3545 case IXGBE_LINKS_SPEED_10G_82599:
3546 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3547 if (hw->mac.type >= ixgbe_mac_X550) {
3548 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3549 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3550 }
3551 break;
3552 case IXGBE_LINKS_SPEED_1G_82599:
3553 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3554 break;
3555 case IXGBE_LINKS_SPEED_100_82599:
3556 *speed = IXGBE_LINK_SPEED_100_FULL;
3557 if (hw->mac.type == ixgbe_mac_X550) {
3558 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3559 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3560 }
3561 break;
3562 case IXGBE_LINKS_SPEED_10_X550EM_A:
3563 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3564 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
3565 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
3566 *speed = IXGBE_LINK_SPEED_10_FULL;
3567 break;
3568 default:
3569 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3570 }
3571
3572 return IXGBE_SUCCESS;
3573 }
3574
3575 /**
3576 * ixgbe_get_device_caps_generic - Get additional device capabilities
3577 * @hw: pointer to hardware structure
3578 * @device_caps: the EEPROM word with the extra device capabilities
3579 *
3580 * This function will read the EEPROM location for the device capabilities,
3581 * and return the word through device_caps.
3582 **/
ixgbe_get_device_caps_generic(struct ixgbe_hw * hw,uint16_t * device_caps)3583 int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps)
3584 {
3585 DEBUGFUNC("ixgbe_get_device_caps_generic");
3586
3587 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3588
3589 return IXGBE_SUCCESS;
3590 }
3591
3592 /**
3593 * ixgbe_calculate_checksum - Calculate checksum for buffer
3594 * @buffer: pointer to EEPROM
3595 * @length: size of EEPROM to calculate a checksum for
3596 * Calculates the checksum for some buffer on a specified length. The
3597 * checksum calculated is returned.
3598 **/
ixgbe_calculate_checksum(uint8_t * buffer,uint32_t length)3599 uint8_t ixgbe_calculate_checksum(uint8_t *buffer, uint32_t length)
3600 {
3601 uint32_t i;
3602 uint8_t sum = 0;
3603
3604 DEBUGFUNC("ixgbe_calculate_checksum");
3605
3606 if (!buffer)
3607 return 0;
3608
3609 for (i = 0; i < length; i++)
3610 sum += buffer[i];
3611
3612 return (uint8_t) (0 - sum);
3613 }
3614
3615 /**
3616 * ixgbe_hic_unlocked - Issue command to manageability block unlocked
3617 * @hw: pointer to the HW structure
3618 * @buffer: command to write and where the return status will be placed
3619 * @length: length of buffer, must be multiple of 4 bytes
3620 * @timeout: time in ms to wait for command completion
3621 *
3622 * Communicates with the manageability block. On success return IXGBE_SUCCESS
3623 * else returns semaphore error when encountering an error acquiring
3624 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
3625 *
3626 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
3627 * by the caller.
3628 **/
ixgbe_hic_unlocked(struct ixgbe_hw * hw,uint32_t * buffer,uint32_t length,uint32_t timeout)3629 int32_t ixgbe_hic_unlocked(struct ixgbe_hw *hw, uint32_t *buffer, uint32_t length,
3630 uint32_t timeout)
3631 {
3632 uint32_t hicr, i, fwsts;
3633 uint16_t dword_len;
3634
3635 DEBUGFUNC("ixgbe_hic_unlocked");
3636
3637 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3638 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
3639 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3640 }
3641
3642 /* Set bit 9 of FWSTS clearing FW reset indication */
3643 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
3644 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
3645
3646 /* Check that the host interface is enabled. */
3647 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3648 if (!(hicr & IXGBE_HICR_EN)) {
3649 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3650 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3651 }
3652
3653 /* Calculate length in DWORDs. We must be DWORD aligned */
3654 if (length % sizeof(uint32_t)) {
3655 DEBUGOUT("Buffer length failure, not aligned to dword");
3656 return IXGBE_ERR_INVALID_ARGUMENT;
3657 }
3658
3659 dword_len = length >> 2;
3660
3661 /* The device driver writes the relevant command block
3662 * into the ram area.
3663 */
3664 for (i = 0; i < dword_len; i++)
3665 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3666 i, htole32(buffer[i]));
3667
3668 /* Setting this bit tells the ARC that a new command is pending. */
3669 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3670
3671 for (i = 0; i < timeout; i++) {
3672 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3673 if (!(hicr & IXGBE_HICR_C))
3674 break;
3675 msec_delay(1);
3676 }
3677
3678 /* Check command completion */
3679 if ((timeout && i == timeout) ||
3680 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
3681 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
3682 "Command has failed with no status valid.\n");
3683 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3684 }
3685
3686 return IXGBE_SUCCESS;
3687 }
3688
3689 /**
3690 * ixgbe_host_interface_command - Issue command to manageability block
3691 * @hw: pointer to the HW structure
3692 * @buffer: contains the command to write and where the return status will
3693 * be placed
3694 * @length: length of buffer, must be multiple of 4 bytes
3695 * @timeout: time in ms to wait for command completion
3696 * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
3697 * Needed because FW structures are big endian and decoding of
3698 * these fields can be 8 bit or 16 bit based on command. Decoding
3699 * is not easily understood without making a table of commands.
3700 * So we will leave this up to the caller to read back the data
3701 * in these cases.
3702 *
3703 * Communicates with the manageability block. On success return IXGBE_SUCCESS
3704 * else returns semaphore error when encountering an error acquiring
3705 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
3706 **/
ixgbe_host_interface_command(struct ixgbe_hw * hw,uint32_t * buffer,uint32_t length,uint32_t timeout,bool return_data)3707 int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
3708 uint32_t length, uint32_t timeout, bool return_data)
3709 {
3710 uint32_t hdr_size = sizeof(struct ixgbe_hic_hdr);
3711 struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
3712 uint16_t buf_len;
3713 int32_t status;
3714 uint32_t bi;
3715 uint32_t dword_len;
3716
3717 DEBUGFUNC("ixgbe_host_interface_command");
3718
3719 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3720 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
3721 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3722 }
3723
3724 /* Take management host interface semaphore */
3725 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3726 if (status)
3727 return status;
3728
3729 status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
3730 if (status)
3731 goto rel_out;
3732
3733 if (!return_data)
3734 goto rel_out;
3735
3736 /* Calculate length in DWORDs */
3737 dword_len = hdr_size >> 2;
3738
3739 /* first pull in the header so we know the buffer length */
3740 for (bi = 0; bi < dword_len; bi++) {
3741 buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3742 IXGBE_FLEX_MNG, bi));
3743 }
3744
3745 /*
3746 * If there is any thing in data position pull it in
3747 * Read Flash command requires reading buffer length from
3748 * two byes instead of one byte
3749 */
3750 if (resp->cmd == 0x30) {
3751 for (; bi < dword_len + 2; bi++) {
3752 buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3753 IXGBE_FLEX_MNG, bi));
3754 }
3755 buf_len = (((uint16_t)(resp->cmd_or_resp.ret_status) << 3)
3756 & 0xF00) | resp->buf_len;
3757 hdr_size += (2 << 2);
3758 } else {
3759 buf_len = resp->buf_len;
3760 }
3761 if (!buf_len)
3762 goto rel_out;
3763
3764 if (length < buf_len + hdr_size) {
3765 DEBUGOUT("Buffer not large enough for reply message.\n");
3766 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3767 goto rel_out;
3768 }
3769
3770 /* Calculate length in DWORDs, add 3 for odd lengths */
3771 dword_len = (buf_len + 3) >> 2;
3772
3773 /* Pull in the rest of the buffer (bi is where we left off) */
3774 for (; bi <= dword_len; bi++) {
3775 buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3776 IXGBE_FLEX_MNG, bi));
3777 }
3778
3779 rel_out:
3780 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3781
3782 return status;
3783 }
3784
3785 /**
3786 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
3787 * @hw: pointer to the hardware structure
3788 *
3789 * The 82599 and x540 MACs can experience issues if TX work is still pending
3790 * when a reset occurs. This function prevents this by flushing the PCIe
3791 * buffers on the system.
3792 **/
ixgbe_clear_tx_pending(struct ixgbe_hw * hw)3793 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3794 {
3795 uint32_t gcr_ext, hlreg0, i, poll;
3796 uint16_t value;
3797
3798 /*
3799 * If double reset is not requested then all transactions should
3800 * already be clear and as such there is no work to do
3801 */
3802 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3803 return;
3804
3805 /*
3806 * Set loopback enable to prevent any transmits from being sent
3807 * should the link come up. This assumes that the RXCTRL.RXEN bit
3808 * has already been cleared.
3809 */
3810 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3811 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3812
3813 /* Wait for a last completion before clearing buffers */
3814 IXGBE_WRITE_FLUSH(hw);
3815 msec_delay(3);
3816
3817 /*
3818 * Before proceeding, make sure that the PCIe block does not have
3819 * transactions pending.
3820 */
3821 poll = ixgbe_pcie_timeout_poll(hw);
3822 for (i = 0; i < poll; i++) {
3823 usec_delay(100);
3824 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3825 if (IXGBE_REMOVED(hw->hw_addr))
3826 goto out;
3827 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3828 goto out;
3829 }
3830
3831 out:
3832 /* initiate cleaning flow for buffers in the PCIe transaction layer */
3833 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3834 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3835 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3836
3837 /* Flush all writes and allow 20usec for all transactions to clear */
3838 IXGBE_WRITE_FLUSH(hw);
3839 usec_delay(20);
3840
3841 /* restore previous register values */
3842 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3843 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3844 }
3845
ixgbe_disable_rx_generic(struct ixgbe_hw * hw)3846 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
3847 {
3848 uint32_t pfdtxgswc;
3849 uint32_t rxctrl;
3850
3851 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3852 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3853 if (hw->mac.type != ixgbe_mac_82598EB) {
3854 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3855 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3856 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3857 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3858 hw->mac.set_lben = TRUE;
3859 } else {
3860 hw->mac.set_lben = FALSE;
3861 }
3862 }
3863 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3864 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3865 }
3866 }
3867
ixgbe_enable_rx_generic(struct ixgbe_hw * hw)3868 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
3869 {
3870 uint32_t pfdtxgswc;
3871 uint32_t rxctrl;
3872
3873 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3874 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
3875
3876 if (hw->mac.type != ixgbe_mac_82598EB) {
3877 if (hw->mac.set_lben) {
3878 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3879 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
3880 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3881 hw->mac.set_lben = FALSE;
3882 }
3883 }
3884 }
3885
3886 /**
3887 * ixgbe_mng_present - returns TRUE when management capability is present
3888 * @hw: pointer to hardware structure
3889 */
ixgbe_mng_present(struct ixgbe_hw * hw)3890 bool ixgbe_mng_present(struct ixgbe_hw *hw)
3891 {
3892 uint32_t fwsm;
3893
3894 if (hw->mac.type < ixgbe_mac_82599EB)
3895 return FALSE;
3896
3897 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
3898
3899 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
3900 }
3901
3902 /**
3903 * ixgbe_mng_enabled - Is the manageability engine enabled?
3904 * @hw: pointer to hardware structure
3905 *
3906 * Returns TRUE if the manageability engine is enabled.
3907 **/
ixgbe_mng_enabled(struct ixgbe_hw * hw)3908 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
3909 {
3910 uint32_t fwsm, manc, factps;
3911
3912 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
3913 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
3914 return FALSE;
3915
3916 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
3917 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
3918 return FALSE;
3919
3920 if (hw->mac.type <= ixgbe_mac_X540) {
3921 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
3922 if (factps & IXGBE_FACTPS_MNGCG)
3923 return FALSE;
3924 }
3925
3926 return TRUE;
3927 }
3928
3929 /**
3930 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
3931 * @hw: pointer to hardware structure
3932 * @speed: new link speed
3933 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
3934 *
3935 * Set the link speed in the MAC and/or PHY register and restarts link.
3936 **/
ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)3937 int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
3938 ixgbe_link_speed speed,
3939 bool autoneg_wait_to_complete)
3940 {
3941 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3942 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3943 int32_t status = IXGBE_SUCCESS;
3944 uint32_t speedcnt = 0;
3945 uint32_t i = 0;
3946 bool autoneg, link_up = FALSE;
3947
3948 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
3949
3950 /* Mask off requested but non-supported speeds */
3951 status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
3952 if (status != IXGBE_SUCCESS)
3953 return status;
3954
3955 speed &= link_speed;
3956
3957 /* Try each speed one by one, highest priority first. We do this in
3958 * software because 10Gb fiber doesn't support speed autonegotiation.
3959 */
3960 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
3961 speedcnt++;
3962 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
3963
3964 /* Set the module link speed */
3965 switch (hw->phy.media_type) {
3966 case ixgbe_media_type_fiber_fixed:
3967 case ixgbe_media_type_fiber:
3968 if (hw->mac.ops.set_rate_select_speed)
3969 hw->mac.ops.set_rate_select_speed(hw,
3970 IXGBE_LINK_SPEED_10GB_FULL);
3971 break;
3972 case ixgbe_media_type_fiber_qsfp:
3973 /* QSFP module automatically detects MAC link speed */
3974 break;
3975 default:
3976 DEBUGOUT("Unexpected media type.\n");
3977 break;
3978 }
3979
3980 /* Allow module to change analog characteristics (1G->10G) */
3981 msec_delay(40);
3982
3983 if (!hw->mac.ops.setup_mac_link)
3984 return IXGBE_NOT_IMPLEMENTED;
3985 status = hw->mac.ops.setup_mac_link(hw,
3986 IXGBE_LINK_SPEED_10GB_FULL,
3987 autoneg_wait_to_complete);
3988 if (status != IXGBE_SUCCESS)
3989 return status;
3990
3991 /* Flap the Tx laser if it has not already been done */
3992 ixgbe_flap_tx_laser(hw);
3993
3994 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
3995 * Section 73.10.2, we may have to wait up to 500ms if KR is
3996 * attempted. 82599 uses the same timing for 10g SFI.
3997 */
3998 for (i = 0; i < 5; i++) {
3999 /* Wait for the link partner to also set speed */
4000 msec_delay(100);
4001
4002 /* If we have link, just jump out */
4003 status = ixgbe_check_link(hw, &link_speed,
4004 &link_up, FALSE);
4005 if (status != IXGBE_SUCCESS)
4006 return status;
4007
4008 if (link_up)
4009 goto out;
4010 }
4011 }
4012
4013 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
4014 speedcnt++;
4015 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
4016 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
4017
4018 /* Set the module link speed */
4019 switch (hw->phy.media_type) {
4020 case ixgbe_media_type_fiber_fixed:
4021 case ixgbe_media_type_fiber:
4022 if (hw->mac.ops.set_rate_select_speed)
4023 hw->mac.ops.set_rate_select_speed(hw,
4024 IXGBE_LINK_SPEED_1GB_FULL);
4025 break;
4026 case ixgbe_media_type_fiber_qsfp:
4027 /* QSFP module automatically detects link speed */
4028 break;
4029 default:
4030 DEBUGOUT("Unexpected media type.\n");
4031 break;
4032 }
4033
4034 /* Allow module to change analog characteristics (10G->1G) */
4035 msec_delay(40);
4036
4037 if (!hw->mac.ops.setup_mac_link)
4038 return IXGBE_NOT_IMPLEMENTED;
4039 status = hw->mac.ops.setup_mac_link(hw,
4040 IXGBE_LINK_SPEED_1GB_FULL,
4041 autoneg_wait_to_complete);
4042 if (status != IXGBE_SUCCESS)
4043 return status;
4044
4045 /* Flap the Tx laser if it has not already been done */
4046 ixgbe_flap_tx_laser(hw);
4047
4048 /* Wait for the link partner to also set speed */
4049 msec_delay(100);
4050
4051 /* If we have link, just jump out */
4052 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
4053 if (status != IXGBE_SUCCESS)
4054 return status;
4055
4056 if (link_up)
4057 goto out;
4058 }
4059
4060 /* We didn't get link. Configure back to the highest speed we tried,
4061 * (if there was more than one). We call ourselves back with just the
4062 * single highest speed that the user requested.
4063 */
4064 if (speedcnt > 1)
4065 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
4066 highest_link_speed,
4067 autoneg_wait_to_complete);
4068
4069 out:
4070 /* Set autoneg_advertised value based on input link speed */
4071 hw->phy.autoneg_advertised = 0;
4072
4073 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4074 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
4075
4076 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
4077 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
4078
4079 return status;
4080 }
4081
4082 /**
4083 * ixgbe_set_soft_rate_select_speed - Set module link speed
4084 * @hw: pointer to hardware structure
4085 * @speed: link speed to set
4086 *
4087 * Set module link speed via the soft rate select.
4088 */
ixgbe_set_soft_rate_select_speed(struct ixgbe_hw * hw,ixgbe_link_speed speed)4089 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
4090 ixgbe_link_speed speed)
4091 {
4092 int32_t status;
4093 uint8_t rs, eeprom_data;
4094
4095 switch (speed) {
4096 case IXGBE_LINK_SPEED_10GB_FULL:
4097 /* one bit mask same as setting on */
4098 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
4099 break;
4100 case IXGBE_LINK_SPEED_1GB_FULL:
4101 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
4102 break;
4103 default:
4104 DEBUGOUT("Invalid fixed module speed\n");
4105 return;
4106 }
4107
4108 /* Set RS0 */
4109 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4110 IXGBE_I2C_EEPROM_DEV_ADDR2,
4111 &eeprom_data);
4112 if (status) {
4113 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
4114 goto out;
4115 }
4116
4117 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4118
4119 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4120 IXGBE_I2C_EEPROM_DEV_ADDR2,
4121 eeprom_data);
4122 if (status) {
4123 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
4124 goto out;
4125 }
4126
4127 /* Set RS1 */
4128 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4129 IXGBE_I2C_EEPROM_DEV_ADDR2,
4130 &eeprom_data);
4131 if (status) {
4132 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
4133 goto out;
4134 }
4135
4136 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4137
4138 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
4139 IXGBE_I2C_EEPROM_DEV_ADDR2,
4140 eeprom_data);
4141 if (status) {
4142 DEBUGOUT("Failed to write Rx Rate Select RS1\n");
4143 goto out;
4144 }
4145 out:
4146 return;
4147 }
4148
4149 /* MAC Operations */
4150
4151 /**
4152 * ixgbe_init_shared_code - Initialize the shared code
4153 * @hw: pointer to hardware structure
4154 *
4155 * This will assign function pointers and assign the MAC type and PHY code.
4156 * Does not touch the hardware. This function must be called prior to any
4157 * other function in the shared code. The ixgbe_hw structure should be
4158 * memset to 0 prior to calling this function. The following fields in
4159 * hw structure should be filled in prior to calling this function:
4160 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
4161 * subsystem_vendor_id, and revision_id
4162 **/
ixgbe_init_shared_code(struct ixgbe_hw * hw)4163 int32_t ixgbe_init_shared_code(struct ixgbe_hw *hw)
4164 {
4165 int32_t status;
4166
4167 DEBUGFUNC("ixgbe_init_shared_code");
4168
4169 /*
4170 * Set the mac type
4171 */
4172 ixgbe_set_mac_type(hw);
4173
4174 switch (hw->mac.type) {
4175 case ixgbe_mac_82598EB:
4176 status = ixgbe_init_ops_82598(hw);
4177 break;
4178 case ixgbe_mac_82599EB:
4179 status = ixgbe_init_ops_82599(hw);
4180 break;
4181 case ixgbe_mac_X540:
4182 status = ixgbe_init_ops_X540(hw);
4183 break;
4184 case ixgbe_mac_X550:
4185 status = ixgbe_init_ops_X550(hw);
4186 break;
4187 case ixgbe_mac_X550EM_x:
4188 status = ixgbe_init_ops_X550EM_x(hw);
4189 break;
4190 case ixgbe_mac_X550EM_a:
4191 status = ixgbe_init_ops_X550EM_a(hw);
4192 break;
4193 default:
4194 status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4195 break;
4196 }
4197 hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
4198
4199 return status;
4200 }
4201
4202 /**
4203 * ixgbe_set_mac_type - Sets MAC type
4204 * @hw: pointer to the HW structure
4205 *
4206 * This function sets the mac type of the adapter based on the
4207 * vendor ID and device ID stored in the hw structure.
4208 **/
ixgbe_set_mac_type(struct ixgbe_hw * hw)4209 int32_t ixgbe_set_mac_type(struct ixgbe_hw *hw)
4210 {
4211 int32_t ret_val = IXGBE_SUCCESS;
4212
4213 DEBUGFUNC("ixgbe_set_mac_type\n");
4214
4215 if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) {
4216 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
4217 "Unsupported vendor id: %x", hw->vendor_id);
4218 return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4219 }
4220
4221 hw->mvals = ixgbe_mvals_base;
4222
4223 switch (hw->device_id) {
4224 case IXGBE_DEV_ID_82598:
4225 case IXGBE_DEV_ID_82598_BX:
4226 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
4227 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
4228 case IXGBE_DEV_ID_82598AT:
4229 case IXGBE_DEV_ID_82598AT2:
4230 case IXGBE_DEV_ID_82598AT_DUAL_PORT:
4231 case IXGBE_DEV_ID_82598EB_CX4:
4232 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
4233 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
4234 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
4235 case IXGBE_DEV_ID_82598EB_XF_LR:
4236 case IXGBE_DEV_ID_82598EB_SFP_LOM:
4237 hw->mac.type = ixgbe_mac_82598EB;
4238 break;
4239 case IXGBE_DEV_ID_82599_KX4:
4240 case IXGBE_DEV_ID_82599_KX4_MEZZ:
4241 case IXGBE_DEV_ID_82599_XAUI_LOM:
4242 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
4243 case IXGBE_DEV_ID_82599_KR:
4244 case IXGBE_DEV_ID_82599_SFP:
4245 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
4246 case IXGBE_DEV_ID_82599_SFP_FCOE:
4247 case IXGBE_DEV_ID_82599_SFP_EM:
4248 case IXGBE_DEV_ID_82599_SFP_SF2:
4249 case IXGBE_DEV_ID_82599_SFP_SF_QP:
4250 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
4251 case IXGBE_DEV_ID_82599EN_SFP:
4252 case IXGBE_DEV_ID_82599_CX4:
4253 case IXGBE_DEV_ID_82599_BYPASS:
4254 case IXGBE_DEV_ID_82599_T3_LOM:
4255 hw->mac.type = ixgbe_mac_82599EB;
4256 break;
4257 case IXGBE_DEV_ID_X540T:
4258 case IXGBE_DEV_ID_X540T1:
4259 case IXGBE_DEV_ID_X540_BYPASS:
4260 hw->mac.type = ixgbe_mac_X540;
4261 hw->mvals = ixgbe_mvals_X540;
4262 break;
4263 case IXGBE_DEV_ID_X550T:
4264 case IXGBE_DEV_ID_X550T1:
4265 hw->mac.type = ixgbe_mac_X550;
4266 hw->mvals = ixgbe_mvals_X550;
4267 break;
4268 case IXGBE_DEV_ID_X550EM_X_KX4:
4269 case IXGBE_DEV_ID_X550EM_X_KR:
4270 case IXGBE_DEV_ID_X550EM_X_10G_T:
4271 case IXGBE_DEV_ID_X550EM_X_1G_T:
4272 case IXGBE_DEV_ID_X550EM_X_SFP:
4273 case IXGBE_DEV_ID_X550EM_X_XFI:
4274 hw->mac.type = ixgbe_mac_X550EM_x;
4275 hw->mvals = ixgbe_mvals_X550EM_x;
4276 break;
4277 case IXGBE_DEV_ID_X550EM_A_KR:
4278 case IXGBE_DEV_ID_X550EM_A_KR_L:
4279 case IXGBE_DEV_ID_X550EM_A_SFP_N:
4280 case IXGBE_DEV_ID_X550EM_A_SGMII:
4281 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
4282 case IXGBE_DEV_ID_X550EM_A_1G_T:
4283 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
4284 case IXGBE_DEV_ID_X550EM_A_10G_T:
4285 case IXGBE_DEV_ID_X550EM_A_QSFP:
4286 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
4287 case IXGBE_DEV_ID_X550EM_A_SFP:
4288 hw->mac.type = ixgbe_mac_X550EM_a;
4289 hw->mvals = ixgbe_mvals_X550EM_a;
4290 break;
4291 default:
4292 ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4293 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
4294 "Unsupported device id: %x",
4295 hw->device_id);
4296 break;
4297 }
4298
4299 DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
4300 hw->mac.type, ret_val);
4301 return ret_val;
4302 }
4303
4304 /**
4305 * ixgbe_init_hw - Initialize the hardware
4306 * @hw: pointer to hardware structure
4307 *
4308 * Initialize the hardware by resetting and then starting the hardware
4309 **/
ixgbe_init_hw(struct ixgbe_hw * hw)4310 int32_t ixgbe_init_hw(struct ixgbe_hw *hw)
4311 {
4312 if (hw->mac.ops.init_hw)
4313 return hw->mac.ops.init_hw(hw);
4314 else
4315 return IXGBE_NOT_IMPLEMENTED;
4316 }
4317
4318 /**
4319 * ixgbe_get_media_type - Get media type
4320 * @hw: pointer to hardware structure
4321 *
4322 * Returns the media type (fiber, copper, backplane)
4323 **/
ixgbe_get_media_type(struct ixgbe_hw * hw)4324 enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
4325 {
4326 if (hw->mac.ops.get_media_type)
4327 return hw->mac.ops.get_media_type(hw);
4328 else
4329 return ixgbe_media_type_unknown;
4330 }
4331
4332 /**
4333 * ixgbe_identify_phy - Get PHY type
4334 * @hw: pointer to hardware structure
4335 *
4336 * Determines the physical layer module found on the current adapter.
4337 **/
ixgbe_identify_phy(struct ixgbe_hw * hw)4338 int32_t ixgbe_identify_phy(struct ixgbe_hw *hw)
4339 {
4340 int32_t status = IXGBE_SUCCESS;
4341
4342 if (hw->phy.type == ixgbe_phy_unknown) {
4343 if (hw->phy.ops.identify)
4344 status = hw->phy.ops.identify(hw);
4345 else
4346 status = IXGBE_NOT_IMPLEMENTED;
4347 }
4348
4349 return status;
4350 }
4351
4352 /**
4353 * ixgbe_check_link - Get link and speed status
4354 * @hw: pointer to hardware structure
4355 * @speed: pointer to link speed
4356 * @link_up: TRUE when link is up
4357 * @link_up_wait_to_complete: bool used to wait for link up or not
4358 *
4359 * Reads the links register to determine if link is up and the current speed
4360 **/
ixgbe_check_link(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4361 int32_t ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4362 bool *link_up, bool link_up_wait_to_complete)
4363 {
4364 if (hw->mac.ops.check_link)
4365 return hw->mac.ops.check_link(hw, speed, link_up,
4366 link_up_wait_to_complete);
4367 else
4368 return IXGBE_NOT_IMPLEMENTED;
4369 }
4370
4371 /**
4372 * ixgbe_flap_tx_laser - flap Tx laser to start autotry process
4373 * @hw: pointer to hardware structure
4374 *
4375 * When the driver changes the link speeds that it can support then
4376 * flap the tx laser to alert the link partner to start autotry
4377 * process on its end.
4378 **/
ixgbe_flap_tx_laser(struct ixgbe_hw * hw)4379 void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
4380 {
4381 if (hw->mac.ops.flap_tx_laser)
4382 hw->mac.ops.flap_tx_laser(hw);
4383 }
4384
4385 /**
4386 * ixgbe_set_rar - Set Rx address register
4387 * @hw: pointer to hardware structure
4388 * @index: Receive address register to write
4389 * @addr: Address to put into receive address register
4390 * @vmdq: VMDq "set"
4391 * @enable_addr: set flag that address is active
4392 *
4393 * Puts an ethernet address into a receive address register.
4394 **/
ixgbe_set_rar(struct ixgbe_hw * hw,uint32_t index,uint8_t * addr,uint32_t vmdq,uint32_t enable_addr)4395 int32_t ixgbe_set_rar(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
4396 uint32_t vmdq, uint32_t enable_addr)
4397 {
4398 if (hw->mac.ops.set_rar)
4399 return hw->mac.ops.set_rar(hw, index, addr, vmdq, enable_addr);
4400 else
4401 return IXGBE_NOT_IMPLEMENTED;
4402 }
4403
4404 /**
4405 * ixgbe_set_vmdq - Associate a VMDq index with a receive address
4406 * @hw: pointer to hardware structure
4407 * @rar: receive address register index to associate with VMDq index
4408 * @vmdq: VMDq set or pool index
4409 **/
ixgbe_set_vmdq(struct ixgbe_hw * hw,uint32_t rar,uint32_t vmdq)4410 int32_t ixgbe_set_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
4411 {
4412 if (hw->mac.ops.set_vmdq)
4413 return hw->mac.ops.set_vmdq(hw, rar, vmdq);
4414 else
4415 return IXGBE_NOT_IMPLEMENTED;
4416 }
4417
4418 /**
4419 * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
4420 * @hw: pointer to hardware structure
4421 * @rar: receive address register index to disassociate with VMDq index
4422 * @vmdq: VMDq set or pool index
4423 **/
ixgbe_clear_vmdq(struct ixgbe_hw * hw,uint32_t rar,uint32_t vmdq)4424 int32_t ixgbe_clear_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
4425 {
4426 if (hw->mac.ops.clear_vmdq)
4427 return hw->mac.ops.clear_vmdq(hw, rar, vmdq);
4428 else
4429 return IXGBE_NOT_IMPLEMENTED;
4430 }
4431
4432 /**
4433 * ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
4434 * @hw: pointer to hardware structure
4435 *
4436 * Initializes the Unicast Table Arrays to zero on device load. This
4437 * is part of the Rx init addr execution path.
4438 **/
ixgbe_init_uta_tables(struct ixgbe_hw * hw)4439 int32_t ixgbe_init_uta_tables(struct ixgbe_hw *hw)
4440 {
4441 if (hw->mac.ops.init_uta_tables)
4442 return hw->mac.ops.init_uta_tables(hw);
4443 else
4444 return IXGBE_NOT_IMPLEMENTED;
4445 }
4446
ixgbe_disable_rx(struct ixgbe_hw * hw)4447 void ixgbe_disable_rx(struct ixgbe_hw *hw)
4448 {
4449 if (hw->mac.ops.disable_rx)
4450 hw->mac.ops.disable_rx(hw);
4451 }
4452
ixgbe_enable_rx(struct ixgbe_hw * hw)4453 void ixgbe_enable_rx(struct ixgbe_hw *hw)
4454 {
4455 if (hw->mac.ops.enable_rx)
4456 hw->mac.ops.enable_rx(hw);
4457 }
4458
4459 /*
4460 * MBX: Mailbox handling
4461 */
4462
4463 /**
4464 * ixgbe_read_mbx - Reads a message from the mailbox
4465 * @hw: pointer to the HW structure
4466 * @msg: The message buffer
4467 * @size: Length of buffer
4468 * @mbx_id: id of mailbox to read
4469 *
4470 * returns SUCCESS if it successfully read message from buffer
4471 **/
ixgbe_read_mbx(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)4472 int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4473 {
4474 struct ixgbe_mbx_info *mbx = &hw->mbx;
4475
4476 DEBUGFUNC("ixgbe_read_mbx");
4477
4478 /* limit read to size of mailbox */
4479 if (size > mbx->size)
4480 size = mbx->size;
4481
4482 if (mbx->ops.read)
4483 return mbx->ops.read(hw, msg, size, mbx_id);
4484
4485 return IXGBE_ERR_CONFIG;
4486 }
4487
4488 /**
4489 * ixgbe_poll_mbx - Wait for message and read it from the mailbox
4490 * @hw: pointer to the HW structure
4491 * @msg: The message buffer
4492 * @size: Length of buffer
4493 * @mbx_id: id of mailbox to read
4494 *
4495 * returns SUCCESS if it successfully read message from buffer
4496 **/
ixgbe_poll_mbx(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)4497 int32_t ixgbe_poll_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4498 uint16_t mbx_id)
4499 {
4500 struct ixgbe_mbx_info *mbx = &hw->mbx;
4501 int32_t ret_val;
4502
4503 DEBUGFUNC("ixgbe_poll_mbx");
4504
4505 if (!mbx->ops.read || !mbx->ops.check_for_msg ||
4506 !mbx->timeout)
4507 return IXGBE_ERR_CONFIG;
4508
4509 /* limit read to size of mailbox */
4510 if (size > mbx->size)
4511 size = mbx->size;
4512
4513 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
4514 /* if ack received read message, otherwise we timed out */
4515 if (!ret_val)
4516 return mbx->ops.read(hw, msg, size, mbx_id);
4517
4518 return ret_val;
4519 }
4520
4521 /**
4522 * ixgbe_write_mbx - Write a message to the mailbox
4523 * @hw: pointer to the HW structure
4524 * @msg: The message buffer
4525 * @size: Length of buffer
4526 * @mbx_id: id of mailbox to write
4527 *
4528 * returns SUCCESS if it successfully copied message into the buffer
4529 **/
ixgbe_write_mbx(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)4530 int32_t ixgbe_write_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4531 {
4532 struct ixgbe_mbx_info *mbx = &hw->mbx;
4533 int32_t ret_val = IXGBE_ERR_MBX;
4534
4535 DEBUGFUNC("ixgbe_write_mbx");
4536
4537 /*
4538 * exit if either we can't write, release
4539 * or there is no timeout defined
4540 */
4541 if (!mbx->ops.write || !mbx->ops.check_for_ack ||
4542 !mbx->ops.release || !mbx->timeout)
4543 return IXGBE_ERR_CONFIG;
4544
4545 if (size > mbx->size) {
4546 ret_val = IXGBE_ERR_PARAM;
4547 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
4548 "Invalid mailbox message size %u", size);
4549 } else {
4550 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4551 }
4552
4553 return ret_val;
4554 }
4555
4556 /**
4557 * ixgbe_check_for_msg - checks to see if someone sent us mail
4558 * @hw: pointer to the HW structure
4559 * @mbx_id: id of mailbox to check
4560 *
4561 * returns SUCCESS if the Status bit was found or else ERR_MBX
4562 **/
ixgbe_check_for_msg(struct ixgbe_hw * hw,uint16_t mbx_id)4563 int32_t ixgbe_check_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
4564 {
4565 struct ixgbe_mbx_info *mbx = &hw->mbx;
4566 int32_t ret_val = IXGBE_ERR_MBX;
4567
4568 DEBUGFUNC("ixgbe_check_for_msg");
4569
4570 if (mbx->ops.check_for_msg)
4571 ret_val = mbx->ops.check_for_msg(hw, mbx_id);
4572
4573 return ret_val;
4574 }
4575
4576 /**
4577 * ixgbe_check_for_ack - checks to see if someone sent us ACK
4578 * @hw: pointer to the HW structure
4579 * @mbx_id: id of mailbox to check
4580 *
4581 * returns SUCCESS if the Status bit was found or else ERR_MBX
4582 **/
ixgbe_check_for_ack(struct ixgbe_hw * hw,uint16_t mbx_id)4583 int32_t ixgbe_check_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
4584 {
4585 struct ixgbe_mbx_info *mbx = &hw->mbx;
4586 int32_t ret_val = IXGBE_ERR_MBX;
4587
4588 DEBUGFUNC("ixgbe_check_for_ack");
4589
4590 if (mbx->ops.check_for_ack)
4591 ret_val = mbx->ops.check_for_ack(hw, mbx_id);
4592
4593 return ret_val;
4594 }
4595
4596 /**
4597 * ixgbe_check_for_rst - checks to see if other side has reset
4598 * @hw: pointer to the HW structure
4599 * @mbx_id: id of mailbox to check
4600 *
4601 * returns SUCCESS if the Status bit was found or else ERR_MBX
4602 **/
ixgbe_check_for_rst(struct ixgbe_hw * hw,uint16_t mbx_id)4603 int32_t ixgbe_check_for_rst(struct ixgbe_hw *hw, uint16_t mbx_id)
4604 {
4605 struct ixgbe_mbx_info *mbx = &hw->mbx;
4606 int32_t ret_val = IXGBE_ERR_MBX;
4607
4608 DEBUGFUNC("ixgbe_check_for_rst");
4609
4610 if (mbx->ops.check_for_rst)
4611 ret_val = mbx->ops.check_for_rst(hw, mbx_id);
4612
4613 return ret_val;
4614 }
4615
4616 /**
4617 * ixgbe_poll_for_msg - Wait for message notification
4618 * @hw: pointer to the HW structure
4619 * @mbx_id: id of mailbox to write
4620 *
4621 * returns SUCCESS if it successfully received a message notification
4622 **/
ixgbe_poll_for_msg(struct ixgbe_hw * hw,uint16_t mbx_id)4623 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
4624 {
4625 struct ixgbe_mbx_info *mbx = &hw->mbx;
4626 int countdown = mbx->timeout;
4627
4628 DEBUGFUNC("ixgbe_poll_for_msg");
4629
4630 if (!countdown || !mbx->ops.check_for_msg)
4631 return IXGBE_ERR_CONFIG;
4632
4633 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
4634 countdown--;
4635 if (!countdown)
4636 break;
4637 usec_delay(mbx->usec_delay);
4638 }
4639
4640 if (countdown == 0) {
4641 ERROR_REPORT2(IXGBE_ERROR_POLLING,
4642 "Polling for VF%u mailbox message timedout", mbx_id);
4643 return IXGBE_ERR_TIMEOUT;
4644 }
4645
4646 return IXGBE_SUCCESS;
4647 }
4648
4649 /**
4650 * ixgbe_poll_for_ack - Wait for message acknowledgement
4651 * @hw: pointer to the HW structure
4652 * @mbx_id: id of mailbox to write
4653 *
4654 * returns SUCCESS if it successfully received a message acknowledgement
4655 **/
ixgbe_poll_for_ack(struct ixgbe_hw * hw,uint16_t mbx_id)4656 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
4657 {
4658 struct ixgbe_mbx_info *mbx = &hw->mbx;
4659 int countdown = mbx->timeout;
4660
4661 DEBUGFUNC("ixgbe_poll_for_ack");
4662
4663 if (!countdown || !mbx->ops.check_for_ack)
4664 return IXGBE_ERR_CONFIG;
4665
4666 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
4667 countdown--;
4668 if (!countdown)
4669 break;
4670 usec_delay(mbx->usec_delay);
4671 }
4672
4673 if (countdown == 0) {
4674 ERROR_REPORT2(IXGBE_ERROR_POLLING,
4675 "Polling for VF%u mailbox ack timedout", mbx_id);
4676 return IXGBE_ERR_TIMEOUT;
4677 }
4678
4679 return IXGBE_SUCCESS;
4680 }
4681
4682 /**
4683 * ixgbe_read_mailbox_vf - read VF's mailbox register
4684 * @hw: pointer to the HW structure
4685 *
4686 * This function is used to read the mailbox register dedicated for VF without
4687 * losing the read to clear status bits.
4688 **/
ixgbe_read_mailbox_vf(struct ixgbe_hw * hw)4689 static uint32_t ixgbe_read_mailbox_vf(struct ixgbe_hw *hw)
4690 {
4691 uint32_t vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
4692
4693 vf_mailbox |= hw->mbx.vf_mailbox;
4694 hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
4695
4696 return vf_mailbox;
4697 }
4698
ixgbe_clear_msg_vf(struct ixgbe_hw * hw)4699 static void ixgbe_clear_msg_vf(struct ixgbe_hw *hw)
4700 {
4701 uint32_t vf_mailbox = ixgbe_read_mailbox_vf(hw);
4702
4703 if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
4704 hw->mbx.stats.reqs++;
4705 hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
4706 }
4707 }
4708
ixgbe_clear_ack_vf(struct ixgbe_hw * hw)4709 static void ixgbe_clear_ack_vf(struct ixgbe_hw *hw)
4710 {
4711 uint32_t vf_mailbox = ixgbe_read_mailbox_vf(hw);
4712
4713 if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
4714 hw->mbx.stats.acks++;
4715 hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
4716 }
4717 }
4718
ixgbe_clear_rst_vf(struct ixgbe_hw * hw)4719 static void ixgbe_clear_rst_vf(struct ixgbe_hw *hw)
4720 {
4721 uint32_t vf_mailbox = ixgbe_read_mailbox_vf(hw);
4722
4723 if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
4724 hw->mbx.stats.rsts++;
4725 hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
4726 IXGBE_VFMAILBOX_RSTD);
4727 }
4728 }
4729
4730 /**
4731 * ixgbe_check_for_bit_vf - Determine if a status bit was set
4732 * @hw: pointer to the HW structure
4733 * @mask: bitmask for bits to be tested and cleared
4734 *
4735 * This function is used to check for the read to clear bits within
4736 * the V2P mailbox.
4737 **/
ixgbe_check_for_bit_vf(struct ixgbe_hw * hw,uint32_t mask)4738 static int32_t ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, uint32_t mask)
4739 {
4740 uint32_t vf_mailbox = ixgbe_read_mailbox_vf(hw);
4741
4742 if (vf_mailbox & mask)
4743 return IXGBE_SUCCESS;
4744
4745 return IXGBE_ERR_MBX;
4746 }
4747
4748 /**
4749 * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
4750 * @hw: pointer to the HW structure
4751 * @mbx_id: id of mailbox to check
4752 *
4753 * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
4754 **/
ixgbe_check_for_msg_vf(struct ixgbe_hw * hw,uint16_t mbx_id)4755 static int32_t ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
4756 {
4757 DEBUGFUNC("ixgbe_check_for_msg_vf");
4758
4759 if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS))
4760 return IXGBE_SUCCESS;
4761
4762 return IXGBE_ERR_MBX;
4763 }
4764
4765 /**
4766 * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
4767 * @hw: pointer to the HW structure
4768 * @mbx_id: id of mailbox to check
4769 *
4770 * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
4771 **/
ixgbe_check_for_ack_vf(struct ixgbe_hw * hw,uint16_t mbx_id)4772 static int32_t ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
4773 {
4774 DEBUGFUNC("ixgbe_check_for_ack_vf");
4775
4776 if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
4777 /* TODO: should this be autocleared? */
4778 ixgbe_clear_ack_vf(hw);
4779 return IXGBE_SUCCESS;
4780 }
4781
4782 return IXGBE_ERR_MBX;
4783 }
4784
4785 /**
4786 * ixgbe_check_for_rst_vf - checks to see if the PF has reset
4787 * @hw: pointer to the HW structure
4788 * @mbx_id: id of mailbox to check
4789 *
4790 * returns TRUE if the PF has set the reset done bit or else FALSE
4791 **/
ixgbe_check_for_rst_vf(struct ixgbe_hw * hw,uint16_t mbx_id)4792 static int32_t ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
4793 {
4794 DEBUGFUNC("ixgbe_check_for_rst_vf");
4795
4796 if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_RSTI |
4797 IXGBE_VFMAILBOX_RSTD)) {
4798 /* TODO: should this be autocleared? */
4799 ixgbe_clear_rst_vf(hw);
4800 return IXGBE_SUCCESS;
4801 }
4802
4803 return IXGBE_ERR_MBX;
4804 }
4805
4806 /**
4807 * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
4808 * @hw: pointer to the HW structure
4809 *
4810 * return SUCCESS if we obtained the mailbox lock
4811 **/
ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw * hw)4812 static int32_t ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
4813 {
4814 struct ixgbe_mbx_info *mbx = &hw->mbx;
4815 int countdown = mbx->timeout;
4816 int32_t ret_val = IXGBE_ERR_MBX;
4817 uint32_t vf_mailbox;
4818
4819 DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
4820
4821 if (!mbx->timeout)
4822 return IXGBE_ERR_CONFIG;
4823
4824 while (countdown--) {
4825 /* Reserve mailbox for VF use */
4826 vf_mailbox = ixgbe_read_mailbox_vf(hw);
4827 vf_mailbox |= IXGBE_VFMAILBOX_VFU;
4828 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
4829
4830 /* Verify that VF is the owner of the lock */
4831 if (ixgbe_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
4832 ret_val = IXGBE_SUCCESS;
4833 break;
4834 }
4835
4836 /* Wait a bit before trying again */
4837 usec_delay(mbx->usec_delay);
4838 }
4839
4840 if (ret_val != IXGBE_SUCCESS) {
4841 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
4842 "Failed to obtain mailbox lock");
4843 ret_val = IXGBE_ERR_TIMEOUT;
4844 }
4845
4846 return ret_val;
4847 }
4848
4849 /**
4850 * ixgbe_read_posted_mbx - Wait for message notification and receive message
4851 * @hw: pointer to the HW structure
4852 * @msg: The message buffer
4853 * @size: Length of buffer
4854 * @mbx_id: id of mailbox to write
4855 *
4856 * returns SUCCESS if it successfully received a message notification and
4857 * copied it into the receive buffer.
4858 **/
ixgbe_read_posted_mbx(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)4859 int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4860 {
4861 struct ixgbe_mbx_info *mbx = &hw->mbx;
4862 int32_t ret_val = IXGBE_ERR_MBX;
4863
4864 DEBUGFUNC("ixgbe_read_posted_mbx");
4865
4866 if (!mbx->ops.read)
4867 goto out;
4868
4869 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
4870
4871 /* if ack received read message, otherwise we timed out */
4872 if (!ret_val)
4873 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
4874 out:
4875 return ret_val;
4876 }
4877
4878 /**
4879 * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
4880 * @hw: pointer to the HW structure
4881 * @msg: The message buffer
4882 * @size: Length of buffer
4883 * @mbx_id: id of mailbox to write
4884 *
4885 * returns SUCCESS if it successfully copied message into the buffer and
4886 * received an ack to that message within delay * timeout period
4887 **/
ixgbe_write_posted_mbx(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)4888 int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4889 uint16_t mbx_id)
4890 {
4891 struct ixgbe_mbx_info *mbx = &hw->mbx;
4892 int32_t ret_val = IXGBE_ERR_MBX;
4893
4894 DEBUGFUNC("ixgbe_write_posted_mbx");
4895
4896 /* exit if either we can't write or there isn't a defined timeout */
4897 if (!mbx->ops.write || !mbx->timeout)
4898 goto out;
4899
4900 /* send msg */
4901 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4902
4903 /* if msg sent wait until we receive an ack */
4904 if (!ret_val)
4905 ret_val = ixgbe_poll_for_ack(hw, mbx_id);
4906 out:
4907 return ret_val;
4908 }
4909
4910 /**
4911 * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
4912 * @hw: pointer to the HW structure
4913 *
4914 * Setups up the mailbox read and write message function pointers
4915 **/
ixgbe_init_mbx_ops_generic(struct ixgbe_hw * hw)4916 void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
4917 {
4918 struct ixgbe_mbx_info *mbx = &hw->mbx;
4919
4920 mbx->ops.read_posted = ixgbe_read_posted_mbx;
4921 mbx->ops.write_posted = ixgbe_write_posted_mbx;
4922 }
4923
ixgbe_check_for_bit_pf(struct ixgbe_hw * hw,uint32_t mask,int32_t index)4924 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index)
4925 {
4926 uint32_t pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
4927
4928 if (pfmbicr & mask)
4929 return IXGBE_SUCCESS;
4930
4931 return IXGBE_ERR_MBX;
4932 }
4933
4934 /**
4935 * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
4936 * @hw: pointer to the HW structure
4937 * @vf_number: the VF index
4938 *
4939 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4940 **/
ixgbe_check_for_msg_pf(struct ixgbe_hw * hw,uint16_t vf_id)4941 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_id)
4942 {
4943 uint32_t vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
4944 int32_t index = IXGBE_PFMBICR_INDEX(vf_id);
4945 DEBUGFUNC("ixgbe_check_for_msg_pf");
4946
4947 if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFREQ_VF1 << vf_shift,
4948 index))
4949 return IXGBE_SUCCESS;
4950
4951 return IXGBE_ERR_MBX;
4952 }
4953
ixgbe_clear_msg_pf(struct ixgbe_hw * hw,uint16_t vf_id)4954 static void ixgbe_clear_msg_pf(struct ixgbe_hw *hw, uint16_t vf_id)
4955 {
4956 uint32_t vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
4957 int32_t index = IXGBE_PFMBICR_INDEX(vf_id);
4958 uint32_t pfmbicr;
4959
4960 pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
4961
4962 if (pfmbicr & (IXGBE_PFMBICR_VFREQ_VF1 << vf_shift))
4963 hw->mbx.stats.reqs++;
4964
4965 IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
4966 IXGBE_PFMBICR_VFREQ_VF1 << vf_shift);
4967 }
4968
ixgbe_clear_ack_pf(struct ixgbe_hw * hw,uint16_t vf_id)4969 static void ixgbe_clear_ack_pf(struct ixgbe_hw *hw, uint16_t vf_id)
4970 {
4971 uint32_t vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
4972 int32_t index = IXGBE_PFMBICR_INDEX(vf_id);
4973 uint32_t pfmbicr;
4974
4975 pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
4976
4977 if (pfmbicr & (IXGBE_PFMBICR_VFACK_VF1 << vf_shift))
4978 hw->mbx.stats.acks++;
4979
4980 IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
4981 IXGBE_PFMBICR_VFACK_VF1 << vf_shift);
4982 }
4983
4984 /**
4985 * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
4986 * @hw: pointer to the HW structure
4987 * @vf_number: the VF index
4988 *
4989 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4990 **/
ixgbe_check_for_ack_pf(struct ixgbe_hw * hw,uint16_t vf_id)4991 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_id)
4992 {
4993 uint32_t vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
4994 int32_t index = IXGBE_PFMBICR_INDEX(vf_id);
4995 int32_t ret_val = IXGBE_ERR_MBX;
4996
4997 DEBUGFUNC("ixgbe_check_for_ack_pf");
4998
4999 if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFACK_VF1 << vf_shift,
5000 index)) {
5001 ret_val = IXGBE_SUCCESS;
5002 /* TODO: should this be autocleared? */
5003 ixgbe_clear_ack_pf(hw, vf_id);
5004 }
5005
5006 return ret_val;
5007 }
5008
5009 /**
5010 * ixgbe_check_for_rst_pf - checks to see if the VF has reset
5011 * @hw: pointer to the HW structure
5012 * @vf_number: the VF index
5013 *
5014 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
5015 **/
ixgbe_check_for_rst_pf(struct ixgbe_hw * hw,uint16_t vf_id)5016 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_id)
5017 {
5018 uint32_t vf_shift = IXGBE_PFVFLRE_SHIFT(vf_id);
5019 uint32_t index = IXGBE_PFVFLRE_INDEX(vf_id);
5020 int32_t ret_val = IXGBE_ERR_MBX;
5021 uint32_t vflre = 0;
5022
5023 DEBUGFUNC("ixgbe_check_for_rst_pf");
5024
5025 switch (hw->mac.type) {
5026 case ixgbe_mac_82599EB:
5027 vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLRE(index));
5028 break;
5029 case ixgbe_mac_X550:
5030 case ixgbe_mac_X550EM_x:
5031 case ixgbe_mac_X550EM_a:
5032 case ixgbe_mac_X540:
5033 vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLREC(index));
5034 break;
5035 default:
5036 break;
5037 }
5038
5039 if (vflre & (1 << vf_shift)) {
5040 ret_val = IXGBE_SUCCESS;
5041 IXGBE_WRITE_REG(hw, IXGBE_PFVFLREC(index), (1 << vf_shift));
5042 hw->mbx.stats.rsts++;
5043 }
5044
5045 return ret_val;
5046 }
5047
5048 /**
5049 * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
5050 * @hw: pointer to the HW structure
5051 * @vf_number: the VF index
5052 *
5053 * return SUCCESS if we obtained the mailbox lock
5054 **/
ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw * hw,uint16_t vf_id)5055 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_id)
5056 {
5057 struct ixgbe_mbx_info *mbx = &hw->mbx;
5058 int countdown = mbx->timeout;
5059 int32_t ret_val = IXGBE_ERR_MBX;
5060 uint32_t pf_mailbox;
5061
5062 DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
5063
5064 if (!mbx->timeout)
5065 return IXGBE_ERR_CONFIG;
5066
5067 while (countdown--) {
5068 /* Reserve mailbox for PF use */
5069 pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
5070 pf_mailbox |= IXGBE_PFMAILBOX_PFU;
5071 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
5072
5073 /* Verify that PF is the owner of the lock */
5074 pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
5075 if (pf_mailbox & IXGBE_PFMAILBOX_PFU) {
5076 ret_val = IXGBE_SUCCESS;
5077 break;
5078 }
5079
5080 /* Wait a bit before trying again */
5081 usec_delay(mbx->usec_delay);
5082 }
5083
5084 if (ret_val != IXGBE_SUCCESS) {
5085 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
5086 "Failed to obtain mailbox lock");
5087 ret_val = IXGBE_ERR_TIMEOUT;
5088 }
5089
5090 return ret_val;
5091 }
5092
5093 /**
5094 * ixgbe_write_mbx_pf - Places a message in the mailbox
5095 * @hw: pointer to the HW structure
5096 * @msg: The message buffer
5097 * @size: Length of buffer
5098 * @vf_number: the VF index
5099 *
5100 * returns SUCCESS if it successfully copied message into the buffer
5101 **/
ixgbe_write_mbx_pf(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t vf_id)5102 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
5103 uint16_t vf_id)
5104 {
5105 uint32_t pf_mailbox;
5106 int32_t ret_val;
5107 uint16_t i;
5108
5109 DEBUGFUNC("ixgbe_write_mbx_pf");
5110
5111 /* lock the mailbox to prevent pf/vf race condition */
5112 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
5113 if (ret_val)
5114 goto out;
5115
5116 /* flush msg and acks as we are overwriting the message buffer */
5117 ixgbe_clear_msg_pf(hw, vf_id);
5118 ixgbe_clear_ack_pf(hw, vf_id);
5119
5120 /* copy the caller specified message to the mailbox memory buffer */
5121 for (i = 0; i < size; i++)
5122 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
5123
5124 /* Interrupt VF to tell it a message has been sent */
5125 pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
5126 pf_mailbox |= IXGBE_PFMAILBOX_STS;
5127 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
5128
5129 /* if msg sent wait until we receive an ack */
5130 ixgbe_poll_for_ack(hw, vf_id);
5131
5132 /* update stats */
5133 hw->mbx.stats.msgs_tx++;
5134
5135 out:
5136 hw->mbx.ops.release(hw, vf_id);
5137
5138 return ret_val;
5139 }
5140
5141 /**
5142 * ixgbe_read_mbx_pf_legacy - Read a message from the mailbox
5143 * @hw: pointer to the HW structure
5144 * @msg: The message buffer
5145 * @size: Length of buffer
5146 * @vf_id: the VF index
5147 *
5148 * This function copies a message from the mailbox buffer to the caller's
5149 * memory buffer. The presumption is that the caller knows that there was
5150 * a message due to a VF request so no polling for message is needed.
5151 **/
ixgbe_read_mbx_pf_legacy(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t vf_id)5152 static int32_t ixgbe_read_mbx_pf_legacy(struct ixgbe_hw *hw, uint32_t *msg,
5153 uint16_t size, uint16_t vf_id)
5154 {
5155 int32_t ret_val;
5156 uint16_t i;
5157
5158 DEBUGFUNC("ixgbe_read_mbx_pf_legacy");
5159
5160 /* lock the mailbox to prevent pf/vf race condition */
5161 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
5162 if (ret_val != IXGBE_SUCCESS)
5163 return ret_val;
5164
5165 /* copy the message to the mailbox memory buffer */
5166 for (i = 0; i < size; i++)
5167 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
5168
5169 /* Acknowledge the message and release buffer */
5170 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_ACK);
5171
5172 /* update stats */
5173 hw->mbx.stats.msgs_rx++;
5174
5175 return IXGBE_SUCCESS;
5176 }
5177
5178 /**
5179 * ixgbe_read_mbx_pf - Read a message from the mailbox
5180 * @hw: pointer to the HW structure
5181 * @msg: The message buffer
5182 * @size: Length of buffer
5183 * @vf_number: the VF index
5184 *
5185 * This function copies a message from the mailbox buffer to the caller's
5186 * memory buffer. The presumption is that the caller knows that there was
5187 * a message due to a VF request so no polling for message is needed.
5188 **/
ixgbe_read_mbx_pf(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t vf_id)5189 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
5190 uint16_t vf_id)
5191 {
5192 uint32_t pf_mailbox;
5193 int32_t ret_val;
5194 uint16_t i;
5195
5196 DEBUGFUNC("ixgbe_read_mbx_pf");
5197
5198 /* check if there is a message from VF */
5199 ret_val = ixgbe_check_for_msg_pf(hw, vf_id);
5200 if (ret_val != IXGBE_SUCCESS)
5201 return IXGBE_ERR_MBX_NOMSG;
5202
5203 ixgbe_clear_msg_pf(hw, vf_id);
5204
5205 /* copy the message to the mailbox memory buffer */
5206 for (i = 0; i < size; i++)
5207 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
5208
5209 /* Acknowledge the message and release buffer */
5210 pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
5211 pf_mailbox |= IXGBE_PFMAILBOX_ACK;
5212 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
5213
5214 /* update stats */
5215 hw->mbx.stats.msgs_rx++;
5216
5217 return IXGBE_SUCCESS;
5218 }
5219
5220 /**
5221 * ixgbe_release_mbx_lock_dummy - release mailbox lock
5222 * @hw: pointer to the HW structure
5223 * @mbx_id: id of mailbox to read
5224 **/
ixgbe_release_mbx_lock_dummy(struct ixgbe_hw * hw,uint16_t mbx_id)5225 static void ixgbe_release_mbx_lock_dummy(struct ixgbe_hw *hw, uint16_t mbx_id)
5226 {
5227 DEBUGFUNC("ixgbe_release_mbx_lock_dummy");
5228 }
5229
5230 /**
5231 * ixgbe_write_mbx_vf_legacy - Write a message to the mailbox
5232 * @hw: pointer to the HW structure
5233 * @msg: The message buffer
5234 * @size: Length of buffer
5235 * @mbx_id: id of mailbox to write
5236 *
5237 * returns SUCCESS if it successfully copied message into the buffer
5238 **/
ixgbe_write_mbx_vf_legacy(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)5239 static int32_t ixgbe_write_mbx_vf_legacy(struct ixgbe_hw *hw, uint32_t *msg,
5240 uint16_t size, uint16_t mbx_id)
5241 {
5242 int32_t ret_val;
5243 uint16_t i;
5244
5245 DEBUGFUNC("ixgbe_write_mbx_vf_legacy");
5246
5247 /* lock the mailbox to prevent pf/vf race condition */
5248 ret_val = ixgbe_obtain_mbx_lock_vf(hw);
5249 if (ret_val)
5250 return ret_val;
5251
5252 /* flush msg and acks as we are overwriting the message buffer */
5253 ixgbe_check_for_msg_vf(hw, 0);
5254 ixgbe_clear_msg_vf(hw);
5255 ixgbe_check_for_ack_vf(hw, 0);
5256 ixgbe_clear_ack_vf(hw);
5257
5258 /* copy the caller specified message to the mailbox memory buffer */
5259 for (i = 0; i < size; i++)
5260 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
5261
5262 /* update stats */
5263 hw->mbx.stats.msgs_tx++;
5264
5265 /* interrupt the PF to tell it a message has been sent */
5266 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
5267
5268 return IXGBE_SUCCESS;
5269 }
5270
5271 /**
5272 * ixgbe_read_mbx_vf_legacy - Reads a message from the inbox intended for vf
5273 * @hw: pointer to the HW structure
5274 * @msg: The message buffer
5275 * @size: Length of buffer
5276 * @mbx_id: id of mailbox to read
5277 *
5278 * returns SUCCESS if it successfully read message from buffer
5279 **/
ixgbe_read_mbx_vf_legacy(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t mbx_id)5280 static int32_t ixgbe_read_mbx_vf_legacy(struct ixgbe_hw *hw, uint32_t *msg,
5281 uint16_t size, uint16_t mbx_id)
5282 {
5283 int32_t ret_val;
5284 uint16_t i;
5285
5286 DEBUGFUNC("ixgbe_read_mbx_vf_legacy");
5287
5288 /* lock the mailbox to prevent pf/vf race condition */
5289 ret_val = ixgbe_obtain_mbx_lock_vf(hw);
5290 if (ret_val)
5291 return ret_val;
5292
5293 /* copy the message from the mailbox memory buffer */
5294 for (i = 0; i < size; i++)
5295 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
5296
5297 /* Acknowledge receipt and release mailbox, then we're done */
5298 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
5299
5300 /* update stats */
5301 hw->mbx.stats.msgs_rx++;
5302
5303 return IXGBE_SUCCESS;
5304 }
5305
5306 /**
5307 * ixgbe_init_mbx_params_vf - set initial values for vf mailbox
5308 * @hw: pointer to the HW structure
5309 *
5310 * Initializes single set the hw->mbx struct to correct values for vf mailbox
5311 * Set of legacy functions is being used here
5312 */
ixgbe_init_mbx_params_vf(struct ixgbe_hw * hw)5313 void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
5314 {
5315 struct ixgbe_mbx_info *mbx = &hw->mbx;
5316
5317 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
5318 mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
5319
5320 mbx->size = IXGBE_VFMAILBOX_SIZE;
5321
5322 mbx->ops.release = ixgbe_release_mbx_lock_dummy;
5323 mbx->ops.read = ixgbe_read_mbx_vf_legacy;
5324 mbx->ops.write = ixgbe_write_mbx_vf_legacy;
5325 mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
5326 mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
5327 mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
5328 mbx->ops.clear = NULL;
5329
5330 mbx->stats.msgs_tx = 0;
5331 mbx->stats.msgs_rx = 0;
5332 mbx->stats.reqs = 0;
5333 mbx->stats.acks = 0;
5334 mbx->stats.rsts = 0;
5335 }
5336
5337 /**
5338 * ixgbe_write_mbx_pf_legacy - Places a message in the mailbox
5339 * @hw: pointer to the HW structure
5340 * @msg: The message buffer
5341 * @size: Length of buffer
5342 * @vf_id: the VF index
5343 *
5344 * returns SUCCESS if it successfully copied message into the buffer
5345 **/
ixgbe_write_mbx_pf_legacy(struct ixgbe_hw * hw,uint32_t * msg,uint16_t size,uint16_t vf_id)5346 static int32_t ixgbe_write_mbx_pf_legacy(struct ixgbe_hw *hw, uint32_t *msg,
5347 uint16_t size, uint16_t vf_id)
5348 {
5349 int32_t ret_val;
5350 uint16_t i;
5351
5352 DEBUGFUNC("ixgbe_write_mbx_pf_legacy");
5353
5354 /* lock the mailbox to prevent pf/vf race condition */
5355 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
5356 if (ret_val)
5357 return ret_val;
5358
5359 /* flush msg and acks as we are overwriting the message buffer */
5360 ixgbe_check_for_msg_pf(hw, vf_id);
5361 ixgbe_clear_msg_pf(hw, vf_id);
5362 ixgbe_check_for_ack_pf(hw, vf_id);
5363 ixgbe_clear_ack_pf(hw, vf_id);
5364
5365 /* copy the caller specified message to the mailbox memory buffer */
5366 for (i = 0; i < size; i++)
5367 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
5368
5369 /* Interrupt VF to tell it a message has been sent and release buffer*/
5370 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_STS);
5371
5372 /* update stats */
5373 hw->mbx.stats.msgs_tx++;
5374
5375 return IXGBE_SUCCESS;
5376 }
5377
5378 /**
5379 * ixgbe_clear_mbx_pf - Clear Mailbox Memory
5380 * @hw: pointer to the HW structure
5381 * @vf_id: the VF index
5382 *
5383 * Set VFMBMEM of given VF to 0x0.
5384 **/
ixgbe_clear_mbx_pf(struct ixgbe_hw * hw,uint16_t vf_id)5385 static int32_t ixgbe_clear_mbx_pf(struct ixgbe_hw *hw, uint16_t vf_id)
5386 {
5387 uint16_t mbx_size = hw->mbx.size;
5388 uint16_t i;
5389
5390 if (vf_id > 63)
5391 return IXGBE_ERR_PARAM;
5392
5393 for (i = 0; i < mbx_size; ++i)
5394 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, 0x0);
5395
5396 return IXGBE_SUCCESS;
5397 }
5398
5399 /**
5400 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
5401 * @hw: pointer to the HW structure
5402 *
5403 * Initializes the hw->mbx struct to correct values for pf mailbox
5404 */
ixgbe_init_mbx_params_pf(struct ixgbe_hw * hw)5405 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
5406 {
5407 struct ixgbe_mbx_info *mbx = &hw->mbx;
5408
5409 if (hw->mac.type != ixgbe_mac_82599EB &&
5410 hw->mac.type != ixgbe_mac_X550 &&
5411 hw->mac.type != ixgbe_mac_X550EM_x &&
5412 hw->mac.type != ixgbe_mac_X550EM_a &&
5413 hw->mac.type != ixgbe_mac_X540)
5414 return;
5415
5416 /* Initialize common mailbox settings */
5417 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
5418 mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
5419 mbx->size = IXGBE_VFMAILBOX_SIZE;
5420
5421 /* Initialize counters with zeroes */
5422 mbx->stats.msgs_tx = 0;
5423 mbx->stats.msgs_rx = 0;
5424 mbx->stats.reqs = 0;
5425 mbx->stats.acks = 0;
5426 mbx->stats.rsts = 0;
5427
5428 /* Initialize mailbox operations */
5429 mbx->ops.release = ixgbe_release_mbx_lock_dummy;
5430 mbx->ops.read = ixgbe_read_mbx_pf_legacy;
5431 mbx->ops.write = ixgbe_write_mbx_pf_legacy;
5432 mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
5433 mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
5434 mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
5435 mbx->ops.clear = ixgbe_clear_mbx_pf;
5436 }
5437