1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40
41 #define IXGBE_82599_MAX_TX_QUEUES 128
42 #define IXGBE_82599_MAX_RX_QUEUES 128
43 #define IXGBE_82599_RAR_ENTRIES 128
44 #define IXGBE_82599_MC_TBL_SIZE 128
45 #define IXGBE_82599_VFT_TBL_SIZE 128
46 #define IXGBE_82599_RX_PB_SIZE 512
47
48 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
49 ixgbe_link_speed speed,
50 bool autoneg_wait_to_complete);
51 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
52 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
53 u16 offset, u16 *data);
54 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
55 u16 words, u16 *data);
56 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
57 u8 dev_addr, u8 *data);
58 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
59 u8 dev_addr, u8 data);
60
ixgbe_init_mac_link_ops_82599(struct ixgbe_hw * hw)61 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
62 {
63 struct ixgbe_mac_info *mac = &hw->mac;
64
65 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
66
67 /*
68 * enable the laser control functions for SFP+ fiber
69 * and MNG not enabled
70 */
71 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
72 !ixgbe_mng_enabled(hw)) {
73 mac->ops.disable_tx_laser =
74 ixgbe_disable_tx_laser_multispeed_fiber;
75 mac->ops.enable_tx_laser =
76 ixgbe_enable_tx_laser_multispeed_fiber;
77 mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
78
79 } else {
80 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL;
82 mac->ops.flap_tx_laser = NULL;
83 }
84
85 if (hw->phy.multispeed_fiber) {
86 /* Set up dual speed SFP+ support */
87 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
88 mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
89 mac->ops.set_rate_select_speed =
90 ixgbe_set_hard_rate_select_speed;
91 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
92 mac->ops.set_rate_select_speed =
93 ixgbe_set_soft_rate_select_speed;
94 } else {
95 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
96 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
97 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
98 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
99 mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
100 } else {
101 mac->ops.setup_link = ixgbe_setup_mac_link_82599;
102 }
103 }
104 }
105
106 /**
107 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
108 * @hw: pointer to hardware structure
109 *
110 * Initialize any function pointers that were not able to be
111 * set during init_shared_code because the PHY/SFP type was
112 * not known. Perform the SFP init if necessary.
113 *
114 **/
ixgbe_init_phy_ops_82599(struct ixgbe_hw * hw)115 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
116 {
117 struct ixgbe_mac_info *mac = &hw->mac;
118 struct ixgbe_phy_info *phy = &hw->phy;
119 s32 ret_val = IXGBE_SUCCESS;
120 u32 esdp;
121
122 DEBUGFUNC("ixgbe_init_phy_ops_82599");
123
124 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
125 /* Store flag indicating I2C bus access control unit. */
126 hw->phy.qsfp_shared_i2c_bus = TRUE;
127
128 /* Initialize access to QSFP+ I2C bus */
129 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
130 esdp |= IXGBE_ESDP_SDP0_DIR;
131 esdp &= ~IXGBE_ESDP_SDP1_DIR;
132 esdp &= ~IXGBE_ESDP_SDP0;
133 esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
134 esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
135 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
136 IXGBE_WRITE_FLUSH(hw);
137
138 phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
139 phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
140 }
141 /* Identify the PHY or SFP module */
142 ret_val = phy->ops.identify(hw);
143 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
144 goto init_phy_ops_out;
145
146 /* Setup function pointers based on detected SFP module and speeds */
147 ixgbe_init_mac_link_ops_82599(hw);
148 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
149 hw->phy.ops.reset = NULL;
150
151 /* If copper media, overwrite with copper function pointers */
152 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
153 mac->ops.setup_link = ixgbe_setup_copper_link_82599;
154 mac->ops.get_link_capabilities =
155 ixgbe_get_copper_link_capabilities_generic;
156 }
157
158 /* Set necessary function pointers based on PHY type */
159 switch (hw->phy.type) {
160 case ixgbe_phy_tn:
161 phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
162 phy->ops.check_link = ixgbe_check_phy_link_tnx;
163 phy->ops.get_firmware_version =
164 ixgbe_get_phy_firmware_version_tnx;
165 break;
166 default:
167 break;
168 }
169 init_phy_ops_out:
170 return ret_val;
171 }
172
ixgbe_setup_sfp_modules_82599(struct ixgbe_hw * hw)173 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
174 {
175 s32 ret_val = IXGBE_SUCCESS;
176 u16 list_offset, data_offset, data_value;
177
178 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
179
180 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
181 ixgbe_init_mac_link_ops_82599(hw);
182
183 hw->phy.ops.reset = NULL;
184
185 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
186 &data_offset);
187 if (ret_val != IXGBE_SUCCESS)
188 goto setup_sfp_out;
189
190 /* PHY config will finish before releasing the semaphore */
191 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
192 IXGBE_GSSR_MAC_CSR_SM);
193 if (ret_val != IXGBE_SUCCESS) {
194 ret_val = IXGBE_ERR_SWFW_SYNC;
195 goto setup_sfp_out;
196 }
197
198 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
199 goto setup_sfp_err;
200 while (data_value != 0xffff) {
201 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
202 IXGBE_WRITE_FLUSH(hw);
203 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
204 goto setup_sfp_err;
205 }
206
207 /* Release the semaphore */
208 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
209 /* Delay obtaining semaphore again to allow FW access
210 * prot_autoc_write uses the semaphore too.
211 */
212 msec_delay(hw->eeprom.semaphore_delay);
213
214 /* Restart DSP and set SFI mode */
215 ret_val = hw->mac.ops.prot_autoc_write(hw,
216 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
217 FALSE);
218
219 if (ret_val) {
220 DEBUGOUT("sfp module setup not complete\n");
221 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
222 goto setup_sfp_out;
223 }
224
225 }
226
227 setup_sfp_out:
228 return ret_val;
229
230 setup_sfp_err:
231 /* Release the semaphore */
232 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
233 /* Delay obtaining semaphore again to allow FW access */
234 msec_delay(hw->eeprom.semaphore_delay);
235 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
236 "eeprom read at offset %d failed", data_offset);
237 return IXGBE_ERR_PHY;
238 }
239
240 /**
241 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
242 * @hw: pointer to hardware structure
243 * @locked: Return the if we locked for this read.
244 * @reg_val: Value we read from AUTOC
245 *
246 * For this part (82599) we need to wrap read-modify-writes with a possible
247 * FW/SW lock. It is assumed this lock will be freed with the next
248 * prot_autoc_write_82599().
249 */
prot_autoc_read_82599(struct ixgbe_hw * hw,bool * locked,u32 * reg_val)250 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
251 {
252 s32 ret_val;
253
254 *locked = FALSE;
255 /* If LESM is on then we need to hold the SW/FW semaphore. */
256 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
257 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
258 IXGBE_GSSR_MAC_CSR_SM);
259 if (ret_val != IXGBE_SUCCESS)
260 return IXGBE_ERR_SWFW_SYNC;
261
262 *locked = TRUE;
263 }
264
265 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
266 return IXGBE_SUCCESS;
267 }
268
269 /**
270 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
271 * @hw: pointer to hardware structure
272 * @autoc: value to write to AUTOC
273 * @locked: bool to indicate whether the SW/FW lock was already taken by
274 * previous proc_autoc_read_82599.
275 *
276 * This part (82599) may need to hold the SW/FW lock around all writes to
277 * AUTOC. Likewise after a write we need to do a pipeline reset.
278 */
prot_autoc_write_82599(struct ixgbe_hw * hw,u32 autoc,bool locked)279 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
280 {
281 s32 ret_val = IXGBE_SUCCESS;
282
283 /* Blocked by MNG FW so bail */
284 if (ixgbe_check_reset_blocked(hw))
285 goto out;
286
287 /* We only need to get the lock if:
288 * - We didn't do it already (in the read part of a read-modify-write)
289 * - LESM is enabled.
290 */
291 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
292 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
293 IXGBE_GSSR_MAC_CSR_SM);
294 if (ret_val != IXGBE_SUCCESS)
295 return IXGBE_ERR_SWFW_SYNC;
296
297 locked = TRUE;
298 }
299
300 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
301 ret_val = ixgbe_reset_pipeline_82599(hw);
302
303 out:
304 /* Free the SW/FW semaphore as we either grabbed it here or
305 * already had it when this function was called.
306 */
307 if (locked)
308 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
309
310 return ret_val;
311 }
312
313 /**
314 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
315 * @hw: pointer to hardware structure
316 *
317 * Initialize the function pointers and assign the MAC type for 82599.
318 * Does not touch the hardware.
319 **/
320
ixgbe_init_ops_82599(struct ixgbe_hw * hw)321 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
322 {
323 struct ixgbe_mac_info *mac = &hw->mac;
324 struct ixgbe_phy_info *phy = &hw->phy;
325 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
326 s32 ret_val;
327
328 DEBUGFUNC("ixgbe_init_ops_82599");
329
330 ixgbe_init_phy_ops_generic(hw);
331 ret_val = ixgbe_init_ops_generic(hw);
332
333 /* PHY */
334 phy->ops.identify = ixgbe_identify_phy_82599;
335 phy->ops.init = ixgbe_init_phy_ops_82599;
336
337 /* MAC */
338 mac->ops.reset_hw = ixgbe_reset_hw_82599;
339 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
340 mac->ops.get_media_type = ixgbe_get_media_type_82599;
341 mac->ops.get_supported_physical_layer =
342 ixgbe_get_supported_physical_layer_82599;
343 mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
344 mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
345 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
346 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
347 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
348 mac->ops.start_hw = ixgbe_start_hw_82599;
349 mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
350 mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
351 mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
352 mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
353 mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
354 mac->ops.prot_autoc_read = prot_autoc_read_82599;
355 mac->ops.prot_autoc_write = prot_autoc_write_82599;
356
357 /* RAR, Multicast, VLAN */
358 mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
359 mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
360 mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
361 mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
362 mac->rar_highwater = 1;
363 mac->ops.set_vfta = ixgbe_set_vfta_generic;
364 mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
365 mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
366 mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
367 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
368 mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
369 mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
370
371 /* Link */
372 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
373 mac->ops.check_link = ixgbe_check_mac_link_generic;
374 mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
375 ixgbe_init_mac_link_ops_82599(hw);
376
377 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
378 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
379 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
380 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
381 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
382 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
383 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
384
385 mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
386 & IXGBE_FWSM_MODE_MASK);
387
388 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
389
390 /* EEPROM */
391 eeprom->ops.read = ixgbe_read_eeprom_82599;
392 eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
393
394 /* Manageability interface */
395 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
396
397 mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
398 mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
399 mac->ops.bypass_set = ixgbe_bypass_set_generic;
400 mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
401
402 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
403
404 return ret_val;
405 }
406
407 /**
408 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
409 * @hw: pointer to hardware structure
410 * @speed: pointer to link speed
411 * @autoneg: TRUE when autoneg or autotry is enabled
412 *
413 * Determines the link capabilities by reading the AUTOC register.
414 **/
ixgbe_get_link_capabilities_82599(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)415 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
416 ixgbe_link_speed *speed,
417 bool *autoneg)
418 {
419 s32 status = IXGBE_SUCCESS;
420 u32 autoc = 0;
421
422 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
423
424
425 /* Check if 1G SFP module. */
426 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
427 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
428 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
429 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
430 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
431 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
432 *speed = IXGBE_LINK_SPEED_1GB_FULL;
433 *autoneg = TRUE;
434 goto out;
435 }
436
437 /*
438 * Determine link capabilities based on the stored value of AUTOC,
439 * which represents EEPROM defaults. If AUTOC value has not
440 * been stored, use the current register values.
441 */
442 if (hw->mac.orig_link_settings_stored)
443 autoc = hw->mac.orig_autoc;
444 else
445 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
446
447 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
448 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
449 *speed = IXGBE_LINK_SPEED_1GB_FULL;
450 *autoneg = FALSE;
451 break;
452
453 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
454 *speed = IXGBE_LINK_SPEED_10GB_FULL;
455 *autoneg = FALSE;
456 break;
457
458 case IXGBE_AUTOC_LMS_1G_AN:
459 *speed = IXGBE_LINK_SPEED_1GB_FULL;
460 *autoneg = TRUE;
461 break;
462
463 case IXGBE_AUTOC_LMS_10G_SERIAL:
464 *speed = IXGBE_LINK_SPEED_10GB_FULL;
465 *autoneg = FALSE;
466 break;
467
468 case IXGBE_AUTOC_LMS_KX4_KX_KR:
469 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
470 *speed = IXGBE_LINK_SPEED_UNKNOWN;
471 if (autoc & IXGBE_AUTOC_KR_SUPP)
472 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
473 if (autoc & IXGBE_AUTOC_KX4_SUPP)
474 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
475 if (autoc & IXGBE_AUTOC_KX_SUPP)
476 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
477 *autoneg = TRUE;
478 break;
479
480 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
481 *speed = IXGBE_LINK_SPEED_100_FULL;
482 if (autoc & IXGBE_AUTOC_KR_SUPP)
483 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
484 if (autoc & IXGBE_AUTOC_KX4_SUPP)
485 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
486 if (autoc & IXGBE_AUTOC_KX_SUPP)
487 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
488 *autoneg = TRUE;
489 break;
490
491 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
492 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
493 *autoneg = FALSE;
494 break;
495
496 default:
497 status = IXGBE_ERR_LINK_SETUP;
498 goto out;
499 break;
500 }
501
502 if (hw->phy.multispeed_fiber) {
503 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
504 IXGBE_LINK_SPEED_1GB_FULL;
505
506 /* QSFP must not enable full auto-negotiation
507 * Limited autoneg is enabled at 1G
508 */
509 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
510 *autoneg = FALSE;
511 else
512 *autoneg = TRUE;
513 }
514
515 out:
516 return status;
517 }
518
519 /**
520 * ixgbe_get_media_type_82599 - Get media type
521 * @hw: pointer to hardware structure
522 *
523 * Returns the media type (fiber, copper, backplane)
524 **/
ixgbe_get_media_type_82599(struct ixgbe_hw * hw)525 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
526 {
527 enum ixgbe_media_type media_type;
528
529 DEBUGFUNC("ixgbe_get_media_type_82599");
530
531 /* Detect if there is a copper PHY attached. */
532 switch (hw->phy.type) {
533 case ixgbe_phy_cu_unknown:
534 case ixgbe_phy_tn:
535 media_type = ixgbe_media_type_copper;
536 goto out;
537 default:
538 break;
539 }
540
541 switch (hw->device_id) {
542 case IXGBE_DEV_ID_82599_KX4:
543 case IXGBE_DEV_ID_82599_KX4_MEZZ:
544 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
545 case IXGBE_DEV_ID_82599_KR:
546 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
547 case IXGBE_DEV_ID_82599_XAUI_LOM:
548 /* Default device ID is mezzanine card KX/KX4 */
549 media_type = ixgbe_media_type_backplane;
550 break;
551 case IXGBE_DEV_ID_82599_SFP:
552 case IXGBE_DEV_ID_82599_SFP_FCOE:
553 case IXGBE_DEV_ID_82599_SFP_EM:
554 case IXGBE_DEV_ID_82599_SFP_SF2:
555 case IXGBE_DEV_ID_82599_SFP_SF_QP:
556 case IXGBE_DEV_ID_82599EN_SFP:
557 media_type = ixgbe_media_type_fiber;
558 break;
559 case IXGBE_DEV_ID_82599_CX4:
560 media_type = ixgbe_media_type_cx4;
561 break;
562 case IXGBE_DEV_ID_82599_T3_LOM:
563 media_type = ixgbe_media_type_copper;
564 break;
565 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
566 media_type = ixgbe_media_type_fiber_qsfp;
567 break;
568 case IXGBE_DEV_ID_82599_BYPASS:
569 media_type = ixgbe_media_type_fiber_fixed;
570 hw->phy.multispeed_fiber = TRUE;
571 break;
572 default:
573 media_type = ixgbe_media_type_unknown;
574 break;
575 }
576 out:
577 return media_type;
578 }
579
580 /**
581 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
582 * @hw: pointer to hardware structure
583 *
584 * Disables link during D3 power down sequence.
585 *
586 **/
ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw * hw)587 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
588 {
589 u32 autoc2_reg;
590 u16 ee_ctrl_2 = 0;
591
592 DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
593 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
594
595 if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
596 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
597 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
598 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
599 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
600 }
601 }
602
603 /**
604 * ixgbe_start_mac_link_82599 - Setup MAC link settings
605 * @hw: pointer to hardware structure
606 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
607 *
608 * Configures link settings based on values in the ixgbe_hw struct.
609 * Restarts the link. Performs autonegotiation if needed.
610 **/
ixgbe_start_mac_link_82599(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)611 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
612 bool autoneg_wait_to_complete)
613 {
614 u32 autoc_reg;
615 u32 links_reg;
616 u32 i;
617 s32 status = IXGBE_SUCCESS;
618 bool got_lock = FALSE;
619
620 DEBUGFUNC("ixgbe_start_mac_link_82599");
621
622
623 /* reset_pipeline requires us to hold this lock as it writes to
624 * AUTOC.
625 */
626 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
627 status = hw->mac.ops.acquire_swfw_sync(hw,
628 IXGBE_GSSR_MAC_CSR_SM);
629 if (status != IXGBE_SUCCESS)
630 goto out;
631
632 got_lock = TRUE;
633 }
634
635 /* Restart link */
636 ixgbe_reset_pipeline_82599(hw);
637
638 if (got_lock)
639 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
640
641 /* Only poll for autoneg to complete if specified to do so */
642 if (autoneg_wait_to_complete) {
643 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
644 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
645 IXGBE_AUTOC_LMS_KX4_KX_KR ||
646 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
647 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
648 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
649 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
650 links_reg = 0; /* Just in case Autoneg time = 0 */
651 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
652 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
653 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
654 break;
655 msec_delay(100);
656 }
657 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
658 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
659 DEBUGOUT("Autoneg did not complete.\n");
660 }
661 }
662 }
663
664 /* Add delay to filter out noises during initial link setup */
665 msec_delay(50);
666
667 out:
668 return status;
669 }
670
671 /**
672 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
673 * @hw: pointer to hardware structure
674 *
675 * The base drivers may require better control over SFP+ module
676 * PHY states. This includes selectively shutting down the Tx
677 * laser on the PHY, effectively halting physical link.
678 **/
ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)679 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
680 {
681 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
682
683 /* Blocked by MNG FW so bail */
684 if (ixgbe_check_reset_blocked(hw))
685 return;
686
687 /* Disable Tx laser; allow 100us to go dark per spec */
688 esdp_reg |= IXGBE_ESDP_SDP3;
689 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
690 IXGBE_WRITE_FLUSH(hw);
691 usec_delay(100);
692 }
693
694 /**
695 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
696 * @hw: pointer to hardware structure
697 *
698 * The base drivers may require better control over SFP+ module
699 * PHY states. This includes selectively turning on the Tx
700 * laser on the PHY, effectively starting physical link.
701 **/
ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)702 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
703 {
704 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
705
706 /* Enable Tx laser; allow 100ms to light up */
707 esdp_reg &= ~IXGBE_ESDP_SDP3;
708 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
709 IXGBE_WRITE_FLUSH(hw);
710 msec_delay(100);
711 }
712
713 /**
714 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
715 * @hw: pointer to hardware structure
716 *
717 * When the driver changes the link speeds that it can support,
718 * it sets autotry_restart to TRUE to indicate that we need to
719 * initiate a new autotry session with the link partner. To do
720 * so, we set the speed then disable and re-enable the Tx laser, to
721 * alert the link partner that it also needs to restart autotry on its
722 * end. This is consistent with TRUE clause 37 autoneg, which also
723 * involves a loss of signal.
724 **/
ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)725 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
726 {
727 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
728
729 /* Blocked by MNG FW so bail */
730 if (ixgbe_check_reset_blocked(hw))
731 return;
732
733 if (hw->mac.autotry_restart) {
734 ixgbe_disable_tx_laser_multispeed_fiber(hw);
735 ixgbe_enable_tx_laser_multispeed_fiber(hw);
736 hw->mac.autotry_restart = FALSE;
737 }
738 }
739
740 /**
741 * ixgbe_set_hard_rate_select_speed - Set module link speed
742 * @hw: pointer to hardware structure
743 * @speed: link speed to set
744 *
745 * Set module link speed via RS0/RS1 rate select pins.
746 */
ixgbe_set_hard_rate_select_speed(struct ixgbe_hw * hw,ixgbe_link_speed speed)747 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
748 ixgbe_link_speed speed)
749 {
750 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
751
752 switch (speed) {
753 case IXGBE_LINK_SPEED_10GB_FULL:
754 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
755 break;
756 case IXGBE_LINK_SPEED_1GB_FULL:
757 esdp_reg &= ~IXGBE_ESDP_SDP5;
758 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
759 break;
760 default:
761 DEBUGOUT("Invalid fixed module speed\n");
762 return;
763 }
764
765 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
766 IXGBE_WRITE_FLUSH(hw);
767 }
768
769 /**
770 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
771 * @hw: pointer to hardware structure
772 * @speed: new link speed
773 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
774 *
775 * Implements the Intel SmartSpeed algorithm.
776 **/
ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)777 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
778 ixgbe_link_speed speed,
779 bool autoneg_wait_to_complete)
780 {
781 s32 status = IXGBE_SUCCESS;
782 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
783 s32 i, j;
784 bool link_up = FALSE;
785 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
786
787 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
788
789 /* Set autoneg_advertised value based on input link speed */
790 hw->phy.autoneg_advertised = 0;
791
792 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
793 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
794
795 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
796 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
797
798 if (speed & IXGBE_LINK_SPEED_100_FULL)
799 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
800
801 /*
802 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
803 * autoneg advertisement if link is unable to be established at the
804 * highest negotiated rate. This can sometimes happen due to integrity
805 * issues with the physical media connection.
806 */
807
808 /* First, try to get link with full advertisement */
809 hw->phy.smart_speed_active = FALSE;
810 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
811 status = ixgbe_setup_mac_link_82599(hw, speed,
812 autoneg_wait_to_complete);
813 if (status != IXGBE_SUCCESS)
814 goto out;
815
816 /*
817 * Wait for the controller to acquire link. Per IEEE 802.3ap,
818 * Section 73.10.2, we may have to wait up to 500ms if KR is
819 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
820 * Table 9 in the AN MAS.
821 */
822 for (i = 0; i < 5; i++) {
823 msec_delay(100);
824
825 /* If we have link, just jump out */
826 status = ixgbe_check_link(hw, &link_speed, &link_up,
827 FALSE);
828 if (status != IXGBE_SUCCESS)
829 goto out;
830
831 if (link_up)
832 goto out;
833 }
834 }
835
836 /*
837 * We didn't get link. If we advertised KR plus one of KX4/KX
838 * (or BX4/BX), then disable KR and try again.
839 */
840 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
841 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
842 goto out;
843
844 /* Turn SmartSpeed on to disable KR support */
845 hw->phy.smart_speed_active = TRUE;
846 status = ixgbe_setup_mac_link_82599(hw, speed,
847 autoneg_wait_to_complete);
848 if (status != IXGBE_SUCCESS)
849 goto out;
850
851 /*
852 * Wait for the controller to acquire link. 600ms will allow for
853 * the AN link_fail_inhibit_timer as well for multiple cycles of
854 * parallel detect, both 10g and 1g. This allows for the maximum
855 * connect attempts as defined in the AN MAS table 73-7.
856 */
857 for (i = 0; i < 6; i++) {
858 msec_delay(100);
859
860 /* If we have link, just jump out */
861 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
862 if (status != IXGBE_SUCCESS)
863 goto out;
864
865 if (link_up)
866 goto out;
867 }
868
869 /* We didn't get link. Turn SmartSpeed back off. */
870 hw->phy.smart_speed_active = FALSE;
871 status = ixgbe_setup_mac_link_82599(hw, speed,
872 autoneg_wait_to_complete);
873
874 out:
875 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
876 DEBUGOUT("Smartspeed has downgraded the link speed "
877 "from the maximum advertised\n");
878 return status;
879 }
880
881 /**
882 * ixgbe_setup_mac_link_82599 - Set MAC link speed
883 * @hw: pointer to hardware structure
884 * @speed: new link speed
885 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
886 *
887 * Set the link speed in the AUTOC register and restarts link.
888 **/
ixgbe_setup_mac_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)889 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
890 ixgbe_link_speed speed,
891 bool autoneg_wait_to_complete)
892 {
893 bool autoneg = FALSE;
894 s32 status = IXGBE_SUCCESS;
895 u32 pma_pmd_1g, link_mode;
896 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
897 u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
898 u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
899 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
900 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
901 u32 links_reg;
902 u32 i;
903 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
904
905 DEBUGFUNC("ixgbe_setup_mac_link_82599");
906
907 /* Check to see if speed passed in is supported. */
908 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
909 if (status)
910 goto out;
911
912 speed &= link_capabilities;
913
914 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
915 status = IXGBE_ERR_LINK_SETUP;
916 goto out;
917 }
918
919 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
920 if (hw->mac.orig_link_settings_stored)
921 orig_autoc = hw->mac.orig_autoc;
922 else
923 orig_autoc = autoc;
924
925 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
926 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
927
928 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
929 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
930 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
931 /* Set KX4/KX/KR support according to speed requested */
932 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
933 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
934 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
935 autoc |= IXGBE_AUTOC_KX4_SUPP;
936 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
937 (hw->phy.smart_speed_active == FALSE))
938 autoc |= IXGBE_AUTOC_KR_SUPP;
939 }
940 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
941 autoc |= IXGBE_AUTOC_KX_SUPP;
942 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
943 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
944 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
945 /* Switch from 1G SFI to 10G SFI if requested */
946 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
947 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
948 autoc &= ~IXGBE_AUTOC_LMS_MASK;
949 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
950 }
951 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
952 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
953 /* Switch from 10G SFI to 1G SFI if requested */
954 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
955 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
956 autoc &= ~IXGBE_AUTOC_LMS_MASK;
957 if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
958 autoc |= IXGBE_AUTOC_LMS_1G_AN;
959 else
960 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
961 }
962 }
963
964 if (autoc != current_autoc) {
965 /* Restart link */
966 status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
967 if (status != IXGBE_SUCCESS)
968 goto out;
969
970 /* Only poll for autoneg to complete if specified to do so */
971 if (autoneg_wait_to_complete) {
972 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
973 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
974 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
975 links_reg = 0; /*Just in case Autoneg time=0*/
976 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
977 links_reg =
978 IXGBE_READ_REG(hw, IXGBE_LINKS);
979 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
980 break;
981 msec_delay(100);
982 }
983 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
984 status =
985 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
986 DEBUGOUT("Autoneg did not complete.\n");
987 }
988 }
989 }
990
991 /* Add delay to filter out noises during initial link setup */
992 msec_delay(50);
993 }
994
995 out:
996 return status;
997 }
998
999 /**
1000 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1001 * @hw: pointer to hardware structure
1002 * @speed: new link speed
1003 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1004 *
1005 * Restarts link on PHY and MAC based on settings passed in.
1006 **/
ixgbe_setup_copper_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)1007 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1008 ixgbe_link_speed speed,
1009 bool autoneg_wait_to_complete)
1010 {
1011 s32 status;
1012
1013 DEBUGFUNC("ixgbe_setup_copper_link_82599");
1014
1015 /* Setup the PHY according to input speed */
1016 status = hw->phy.ops.setup_link_speed(hw, speed,
1017 autoneg_wait_to_complete);
1018 /* Set up MAC */
1019 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1020
1021 return status;
1022 }
1023
1024 /**
1025 * ixgbe_reset_hw_82599 - Perform hardware reset
1026 * @hw: pointer to hardware structure
1027 *
1028 * Resets the hardware by resetting the transmit and receive units, masks
1029 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1030 * reset.
1031 **/
ixgbe_reset_hw_82599(struct ixgbe_hw * hw)1032 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1033 {
1034 ixgbe_link_speed link_speed;
1035 s32 status;
1036 u32 ctrl = 0;
1037 u32 i, autoc, autoc2;
1038 u32 curr_lms;
1039 bool link_up = FALSE;
1040
1041 DEBUGFUNC("ixgbe_reset_hw_82599");
1042
1043 /* Call adapter stop to disable tx/rx and clear interrupts */
1044 status = hw->mac.ops.stop_adapter(hw);
1045 if (status != IXGBE_SUCCESS)
1046 goto reset_hw_out;
1047
1048 /* flush pending Tx transactions */
1049 ixgbe_clear_tx_pending(hw);
1050
1051 /* PHY ops must be identified and initialized prior to reset */
1052
1053 /* Identify PHY and related function pointers */
1054 status = hw->phy.ops.init(hw);
1055
1056 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1057 goto reset_hw_out;
1058
1059 /* Setup SFP module if there is one present. */
1060 if (hw->phy.sfp_setup_needed) {
1061 status = hw->mac.ops.setup_sfp(hw);
1062 hw->phy.sfp_setup_needed = FALSE;
1063 }
1064
1065 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1066 goto reset_hw_out;
1067
1068 /* Reset PHY */
1069 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1070 hw->phy.ops.reset(hw);
1071
1072 /* remember AUTOC from before we reset */
1073 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1074
1075 mac_reset_top:
1076 /*
1077 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1078 * If link reset is used when link is up, it might reset the PHY when
1079 * mng is using it. If link is down or the flag to force full link
1080 * reset is set, then perform link reset.
1081 */
1082 ctrl = IXGBE_CTRL_LNK_RST;
1083 if (!hw->force_full_reset) {
1084 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1085 if (link_up)
1086 ctrl = IXGBE_CTRL_RST;
1087 }
1088
1089 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1090 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1091 IXGBE_WRITE_FLUSH(hw);
1092
1093 /* Poll for reset bit to self-clear meaning reset is complete */
1094 for (i = 0; i < 10; i++) {
1095 usec_delay(1);
1096 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1097 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1098 break;
1099 }
1100
1101 if (ctrl & IXGBE_CTRL_RST_MASK) {
1102 status = IXGBE_ERR_RESET_FAILED;
1103 DEBUGOUT("Reset polling failed to complete.\n");
1104 }
1105
1106 msec_delay(50);
1107
1108 /*
1109 * Double resets are required for recovery from certain error
1110 * conditions. Between resets, it is necessary to stall to
1111 * allow time for any pending HW events to complete.
1112 */
1113 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1114 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1115 goto mac_reset_top;
1116 }
1117
1118 /*
1119 * Store the original AUTOC/AUTOC2 values if they have not been
1120 * stored off yet. Otherwise restore the stored original
1121 * values since the reset operation sets back to defaults.
1122 */
1123 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1124 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1125
1126 /* Enable link if disabled in NVM */
1127 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1128 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1129 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1130 IXGBE_WRITE_FLUSH(hw);
1131 }
1132
1133 if (hw->mac.orig_link_settings_stored == FALSE) {
1134 hw->mac.orig_autoc = autoc;
1135 hw->mac.orig_autoc2 = autoc2;
1136 hw->mac.orig_link_settings_stored = TRUE;
1137 } else {
1138
1139 /* If MNG FW is running on a multi-speed device that
1140 * doesn't autoneg with out driver support we need to
1141 * leave LMS in the state it was before we MAC reset.
1142 * Likewise if we support WoL we don't want change the
1143 * LMS state.
1144 */
1145 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1146 hw->wol_enabled)
1147 hw->mac.orig_autoc =
1148 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1149 curr_lms;
1150
1151 if (autoc != hw->mac.orig_autoc) {
1152 status = hw->mac.ops.prot_autoc_write(hw,
1153 hw->mac.orig_autoc,
1154 FALSE);
1155 if (status != IXGBE_SUCCESS)
1156 goto reset_hw_out;
1157 }
1158
1159 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1160 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1161 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1162 autoc2 |= (hw->mac.orig_autoc2 &
1163 IXGBE_AUTOC2_UPPER_MASK);
1164 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1165 }
1166 }
1167
1168 /* Store the permanent mac address */
1169 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1170
1171 /*
1172 * Store MAC address from RAR0, clear receive address registers, and
1173 * clear the multicast table. Also reset num_rar_entries to 128,
1174 * since we modify this value when programming the SAN MAC address.
1175 */
1176 hw->mac.num_rar_entries = 128;
1177 hw->mac.ops.init_rx_addrs(hw);
1178
1179 /* Store the permanent SAN mac address */
1180 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1181
1182 /* Add the SAN MAC address to the RAR only if it's a valid address */
1183 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1184 /* Save the SAN MAC RAR index */
1185 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1186
1187 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
1188 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1189
1190 /* clear VMDq pool/queue selection for this RAR */
1191 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
1192 IXGBE_CLEAR_VMDQ_ALL);
1193
1194 /* Reserve the last RAR for the SAN MAC address */
1195 hw->mac.num_rar_entries--;
1196 }
1197
1198 /* Store the alternative WWNN/WWPN prefix */
1199 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1200 &hw->mac.wwpn_prefix);
1201
1202 reset_hw_out:
1203 return status;
1204 }
1205
1206 /**
1207 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1208 * @hw: pointer to hardware structure
1209 * @fdircmd: current value of FDIRCMD register
1210 */
ixgbe_fdir_check_cmd_complete(struct ixgbe_hw * hw,u32 * fdircmd)1211 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
1212 {
1213 int i;
1214
1215 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1216 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1217 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1218 return IXGBE_SUCCESS;
1219 usec_delay(10);
1220 }
1221
1222 return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1223 }
1224
1225 /**
1226 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1227 * @hw: pointer to hardware structure
1228 **/
ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw * hw)1229 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1230 {
1231 s32 err;
1232 int i;
1233 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1234 u32 fdircmd;
1235 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1236
1237 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1238
1239 /*
1240 * Before starting reinitialization process,
1241 * FDIRCMD.CMD must be zero.
1242 */
1243 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1244 if (err) {
1245 DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1246 return err;
1247 }
1248
1249 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1250 IXGBE_WRITE_FLUSH(hw);
1251 /*
1252 * 82599 adapters flow director init flow cannot be restarted,
1253 * Workaround 82599 silicon errata by performing the following steps
1254 * before re-writing the FDIRCTRL control register with the same value.
1255 * - write 1 to bit 8 of FDIRCMD register &
1256 * - write 0 to bit 8 of FDIRCMD register
1257 */
1258 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1259 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1260 IXGBE_FDIRCMD_CLEARHT));
1261 IXGBE_WRITE_FLUSH(hw);
1262 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1263 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1264 ~IXGBE_FDIRCMD_CLEARHT));
1265 IXGBE_WRITE_FLUSH(hw);
1266 /*
1267 * Clear FDIR Hash register to clear any leftover hashes
1268 * waiting to be programmed.
1269 */
1270 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1271 IXGBE_WRITE_FLUSH(hw);
1272
1273 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1274 IXGBE_WRITE_FLUSH(hw);
1275
1276 /* Poll init-done after we write FDIRCTRL register */
1277 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1278 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1279 IXGBE_FDIRCTRL_INIT_DONE)
1280 break;
1281 msec_delay(1);
1282 }
1283 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1284 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1285 return IXGBE_ERR_FDIR_REINIT_FAILED;
1286 }
1287
1288 /* Clear FDIR statistics registers (read to clear) */
1289 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1290 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1291 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1292 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1293 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1294
1295 return IXGBE_SUCCESS;
1296 }
1297
1298 /**
1299 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1300 * @hw: pointer to hardware structure
1301 * @fdirctrl: value to write to flow director control register
1302 **/
ixgbe_fdir_enable_82599(struct ixgbe_hw * hw,u32 fdirctrl)1303 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1304 {
1305 int i;
1306
1307 DEBUGFUNC("ixgbe_fdir_enable_82599");
1308
1309 /* Prime the keys for hashing */
1310 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1311 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1312
1313 /*
1314 * Poll init-done after we write the register. Estimated times:
1315 * 10G: PBALLOC = 11b, timing is 60us
1316 * 1G: PBALLOC = 11b, timing is 600us
1317 * 100M: PBALLOC = 11b, timing is 6ms
1318 *
1319 * Multiple these timings by 4 if under full Rx load
1320 *
1321 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1322 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1323 * this might not finish in our poll time, but we can live with that
1324 * for now.
1325 */
1326 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1327 IXGBE_WRITE_FLUSH(hw);
1328 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1329 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1330 IXGBE_FDIRCTRL_INIT_DONE)
1331 break;
1332 msec_delay(1);
1333 }
1334
1335 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1336 DEBUGOUT("Flow Director poll time exceeded!\n");
1337 }
1338
1339 /**
1340 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1341 * @hw: pointer to hardware structure
1342 * @fdirctrl: value to write to flow director control register, initially
1343 * contains just the value of the Rx packet buffer allocation
1344 **/
ixgbe_init_fdir_signature_82599(struct ixgbe_hw * hw,u32 fdirctrl)1345 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1346 {
1347 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1348
1349 /*
1350 * Continue setup of fdirctrl register bits:
1351 * Move the flexible bytes to use the ethertype - shift 6 words
1352 * Set the maximum length per hash bucket to 0xA filters
1353 * Send interrupt when 64 filters are left
1354 */
1355 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1356 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1357 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1358
1359 /* write hashes and fdirctrl register, poll for completion */
1360 ixgbe_fdir_enable_82599(hw, fdirctrl);
1361
1362 return IXGBE_SUCCESS;
1363 }
1364
1365 /**
1366 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1367 * @hw: pointer to hardware structure
1368 * @fdirctrl: value to write to flow director control register, initially
1369 * contains just the value of the Rx packet buffer allocation
1370 * @cloud_mode: TRUE - cloud mode, FALSE - other mode
1371 **/
ixgbe_init_fdir_perfect_82599(struct ixgbe_hw * hw,u32 fdirctrl,bool cloud_mode)1372 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1373 bool cloud_mode)
1374 {
1375 UNREFERENCED_1PARAMETER(cloud_mode);
1376 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1377
1378 /*
1379 * Continue setup of fdirctrl register bits:
1380 * Turn perfect match filtering on
1381 * Report hash in RSS field of Rx wb descriptor
1382 * Initialize the drop queue to queue 127
1383 * Move the flexible bytes to use the ethertype - shift 6 words
1384 * Set the maximum length per hash bucket to 0xA filters
1385 * Send interrupt when 64 (0x4 * 16) filters are left
1386 */
1387 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1388 IXGBE_FDIRCTRL_REPORT_STATUS |
1389 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1390 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1391 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1392 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1393
1394 if (cloud_mode)
1395 fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
1396 IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
1397
1398 /* write hashes and fdirctrl register, poll for completion */
1399 ixgbe_fdir_enable_82599(hw, fdirctrl);
1400
1401 return IXGBE_SUCCESS;
1402 }
1403
1404 /**
1405 * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
1406 * @hw: pointer to hardware structure
1407 * @dropqueue: Rx queue index used for the dropped packets
1408 **/
ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw * hw,u8 dropqueue)1409 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
1410 {
1411 u32 fdirctrl;
1412
1413 DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
1414 /* Clear init done bit and drop queue field */
1415 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1416 fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
1417
1418 /* Set drop queue */
1419 fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1420 if ((hw->mac.type == ixgbe_mac_X550) ||
1421 (hw->mac.type == ixgbe_mac_X550EM_x) ||
1422 (hw->mac.type == ixgbe_mac_X550EM_a))
1423 fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
1424
1425 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1426 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1427 IXGBE_FDIRCMD_CLEARHT));
1428 IXGBE_WRITE_FLUSH(hw);
1429 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1430 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1431 ~IXGBE_FDIRCMD_CLEARHT));
1432 IXGBE_WRITE_FLUSH(hw);
1433
1434 /* write hashes and fdirctrl register, poll for completion */
1435 ixgbe_fdir_enable_82599(hw, fdirctrl);
1436 }
1437
1438 /*
1439 * These defines allow us to quickly generate all of the necessary instructions
1440 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1441 * for values 0 through 15
1442 */
1443 #define IXGBE_ATR_COMMON_HASH_KEY \
1444 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1445 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1446 do { \
1447 u32 n = (_n); \
1448 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1449 common_hash ^= lo_hash_dword >> n; \
1450 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1451 bucket_hash ^= lo_hash_dword >> n; \
1452 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1453 sig_hash ^= lo_hash_dword << (16 - n); \
1454 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1455 common_hash ^= hi_hash_dword >> n; \
1456 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1457 bucket_hash ^= hi_hash_dword >> n; \
1458 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1459 sig_hash ^= hi_hash_dword << (16 - n); \
1460 } while (0)
1461
1462 /**
1463 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1464 * @input: input bitstream to compute the hash on
1465 * @common: compressed common input dword
1466 *
1467 * This function is almost identical to the function above but contains
1468 * several optimizations such as unwinding all of the loops, letting the
1469 * compiler work out all of the conditional ifs since the keys are static
1470 * defines, and computing two keys at once since the hashed dword stream
1471 * will be the same for both keys.
1472 **/
ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common)1473 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1474 union ixgbe_atr_hash_dword common)
1475 {
1476 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1477 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1478
1479 /* record the flow_vm_vlan bits as they are a key part to the hash */
1480 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1481
1482 /* generate common hash dword */
1483 hi_hash_dword = IXGBE_NTOHL(common.dword);
1484
1485 /* low dword is word swapped version of common */
1486 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1487
1488 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1489 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1490
1491 /* Process bits 0 and 16 */
1492 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1493
1494 /*
1495 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1496 * delay this because bit 0 of the stream should not be processed
1497 * so we do not add the VLAN until after bit 0 was processed
1498 */
1499 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1500
1501 /* Process remaining 30 bit of the key */
1502 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1503 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1504 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1505 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1506 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1507 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1508 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1509 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1510 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1511 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1512 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1513 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1514 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1515 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1516 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1517
1518 /* combine common_hash result with signature and bucket hashes */
1519 bucket_hash ^= common_hash;
1520 bucket_hash &= IXGBE_ATR_HASH_MASK;
1521
1522 sig_hash ^= common_hash << 16;
1523 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1524
1525 /* return completed signature hash */
1526 return sig_hash ^ bucket_hash;
1527 }
1528
1529 /**
1530 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1531 * @hw: pointer to hardware structure
1532 * @input: unique input dword
1533 * @common: compressed common input dword
1534 * @queue: queue index to direct traffic to
1535 *
1536 * Note that the tunnel bit in input must not be set when the hardware
1537 * tunneling support does not exist.
1538 **/
ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common,u8 queue)1539 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1540 union ixgbe_atr_hash_dword input,
1541 union ixgbe_atr_hash_dword common,
1542 u8 queue)
1543 {
1544 u64 fdirhashcmd;
1545 u8 flow_type;
1546 bool tunnel;
1547 u32 fdircmd;
1548
1549 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1550
1551 /*
1552 * Get the flow_type in order to program FDIRCMD properly
1553 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1554 * fifth is FDIRCMD.TUNNEL_FILTER
1555 */
1556 tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
1557 flow_type = input.formatted.flow_type &
1558 (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
1559 switch (flow_type) {
1560 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1561 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1562 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1563 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1564 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1565 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1566 break;
1567 default:
1568 DEBUGOUT(" Error on flow type input\n");
1569 return;
1570 }
1571
1572 /* configure FDIRCMD register */
1573 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1574 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1575 fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1576 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1577 if (tunnel)
1578 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1579
1580 /*
1581 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1582 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1583 */
1584 fdirhashcmd = (u64)fdircmd << 32;
1585 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1586 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1587
1588 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1589
1590 return;
1591 }
1592
1593 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1594 do { \
1595 u32 n = (_n); \
1596 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1597 bucket_hash ^= lo_hash_dword >> n; \
1598 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1599 bucket_hash ^= hi_hash_dword >> n; \
1600 } while (0)
1601
1602 /**
1603 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1604 * @input: input bitstream to compute the hash on
1605 * @input_mask: mask for the input bitstream
1606 *
1607 * This function serves two main purposes. First it applies the input_mask
1608 * to the atr_input resulting in a cleaned up atr_input data stream.
1609 * Secondly it computes the hash and stores it in the bkt_hash field at
1610 * the end of the input byte stream. This way it will be available for
1611 * future use without needing to recompute the hash.
1612 **/
ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask)1613 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1614 union ixgbe_atr_input *input_mask)
1615 {
1616
1617 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1618 u32 bucket_hash = 0;
1619 u32 hi_dword = 0;
1620 u32 i = 0;
1621
1622 /* Apply masks to input data */
1623 for (i = 0; i < 14; i++)
1624 input->dword_stream[i] &= input_mask->dword_stream[i];
1625
1626 /* record the flow_vm_vlan bits as they are a key part to the hash */
1627 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1628
1629 /* generate common hash dword */
1630 for (i = 1; i <= 13; i++)
1631 hi_dword ^= input->dword_stream[i];
1632 hi_hash_dword = IXGBE_NTOHL(hi_dword);
1633
1634 /* low dword is word swapped version of common */
1635 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1636
1637 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1638 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1639
1640 /* Process bits 0 and 16 */
1641 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1642
1643 /*
1644 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1645 * delay this because bit 0 of the stream should not be processed
1646 * so we do not add the VLAN until after bit 0 was processed
1647 */
1648 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1649
1650 /* Process remaining 30 bit of the key */
1651 for (i = 1; i <= 15; i++)
1652 IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1653
1654 /*
1655 * Limit hash to 13 bits since max bucket count is 8K.
1656 * Store result at the end of the input stream.
1657 */
1658 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1659 }
1660
1661 /**
1662 * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1663 * @input_mask: mask to be bit swapped
1664 *
1665 * The source and destination port masks for flow director are bit swapped
1666 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1667 * generate a correctly swapped value we need to bit swap the mask and that
1668 * is what is accomplished by this function.
1669 **/
ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input * input_mask)1670 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1671 {
1672 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1673 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1674 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1675 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1676 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1677 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1678 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1679 }
1680
1681 /*
1682 * These two macros are meant to address the fact that we have registers
1683 * that are either all or in part big-endian. As a result on big-endian
1684 * systems we will end up byte swapping the value to little-endian before
1685 * it is byte swapped again and written to the hardware in the original
1686 * big-endian format.
1687 */
1688 #define IXGBE_STORE_AS_BE32(_value) \
1689 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1690 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1691
1692 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1693 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1694
1695 #define IXGBE_STORE_AS_BE16(_value) \
1696 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1697
ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input_mask,bool cloud_mode)1698 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1699 union ixgbe_atr_input *input_mask, bool cloud_mode)
1700 {
1701 /* mask IPv6 since it is currently not supported */
1702 u32 fdirm = IXGBE_FDIRM_DIPv6;
1703 u32 fdirtcpm;
1704 u32 fdirip6m;
1705 UNREFERENCED_1PARAMETER(cloud_mode);
1706 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1707
1708 /*
1709 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1710 * are zero, then assume a full mask for that field. Also assume that
1711 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1712 * cannot be masked out in this implementation.
1713 *
1714 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1715 * point in time.
1716 */
1717
1718 /* verify bucket hash is cleared on hash generation */
1719 if (input_mask->formatted.bkt_hash)
1720 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1721
1722 /* Program FDIRM and verify partial masks */
1723 switch (input_mask->formatted.vm_pool & 0x7F) {
1724 case 0x0:
1725 fdirm |= IXGBE_FDIRM_POOL;
1726 case 0x7F:
1727 break;
1728 default:
1729 DEBUGOUT(" Error on vm pool mask\n");
1730 return IXGBE_ERR_CONFIG;
1731 }
1732
1733 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1734 case 0x0:
1735 fdirm |= IXGBE_FDIRM_L4P;
1736 if (input_mask->formatted.dst_port ||
1737 input_mask->formatted.src_port) {
1738 DEBUGOUT(" Error on src/dst port mask\n");
1739 return IXGBE_ERR_CONFIG;
1740 }
1741 case IXGBE_ATR_L4TYPE_MASK:
1742 break;
1743 default:
1744 DEBUGOUT(" Error on flow type mask\n");
1745 return IXGBE_ERR_CONFIG;
1746 }
1747
1748 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1749 case 0x0000:
1750 /* mask VLAN ID */
1751 fdirm |= IXGBE_FDIRM_VLANID;
1752 /* fall through */
1753 case 0x0FFF:
1754 /* mask VLAN priority */
1755 fdirm |= IXGBE_FDIRM_VLANP;
1756 break;
1757 case 0xE000:
1758 /* mask VLAN ID only */
1759 fdirm |= IXGBE_FDIRM_VLANID;
1760 /* fall through */
1761 case 0xEFFF:
1762 /* no VLAN fields masked */
1763 break;
1764 default:
1765 DEBUGOUT(" Error on VLAN mask\n");
1766 return IXGBE_ERR_CONFIG;
1767 }
1768
1769 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1770 case 0x0000:
1771 /* Mask Flex Bytes */
1772 fdirm |= IXGBE_FDIRM_FLEX;
1773 /* fall through */
1774 case 0xFFFF:
1775 break;
1776 default:
1777 DEBUGOUT(" Error on flexible byte mask\n");
1778 return IXGBE_ERR_CONFIG;
1779 }
1780
1781 if (cloud_mode) {
1782 fdirm |= IXGBE_FDIRM_L3P;
1783 fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
1784 fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
1785
1786 switch (input_mask->formatted.inner_mac[0] & 0xFF) {
1787 case 0x00:
1788 /* Mask inner MAC, fall through */
1789 fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
1790 case 0xFF:
1791 break;
1792 default:
1793 DEBUGOUT(" Error on inner_mac byte mask\n");
1794 return IXGBE_ERR_CONFIG;
1795 }
1796
1797 switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
1798 case 0x0:
1799 /* Mask vxlan id */
1800 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
1801 break;
1802 case 0x00FFFFFF:
1803 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
1804 break;
1805 case 0xFFFFFFFF:
1806 break;
1807 default:
1808 DEBUGOUT(" Error on TNI/VNI byte mask\n");
1809 return IXGBE_ERR_CONFIG;
1810 }
1811
1812 switch (input_mask->formatted.tunnel_type & 0xFFFF) {
1813 case 0x0:
1814 /* Mask turnnel type, fall through */
1815 fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
1816 case 0xFFFF:
1817 break;
1818 default:
1819 DEBUGOUT(" Error on tunnel type byte mask\n");
1820 return IXGBE_ERR_CONFIG;
1821 }
1822 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
1823
1824 /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
1825 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
1826 * L3/L3 packets to tunnel.
1827 */
1828 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
1829 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
1830 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
1831 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
1832 switch (hw->mac.type) {
1833 case ixgbe_mac_X550:
1834 case ixgbe_mac_X550EM_x:
1835 case ixgbe_mac_X550EM_a:
1836 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
1837 break;
1838 default:
1839 break;
1840 }
1841 }
1842
1843 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1844 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1845
1846 if (!cloud_mode) {
1847 /* store the TCP/UDP port masks, bit reversed from port
1848 * layout */
1849 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1850
1851 /* write both the same so that UDP and TCP use the same mask */
1852 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1853 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1854 /* also use it for SCTP */
1855 switch (hw->mac.type) {
1856 case ixgbe_mac_X550:
1857 case ixgbe_mac_X550EM_x:
1858 case ixgbe_mac_X550EM_a:
1859 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1860 break;
1861 default:
1862 break;
1863 }
1864
1865 /* store source and destination IP masks (big-enian) */
1866 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1867 ~input_mask->formatted.src_ip[0]);
1868 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1869 ~input_mask->formatted.dst_ip[0]);
1870 }
1871 return IXGBE_SUCCESS;
1872 }
1873
ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id,u8 queue,bool cloud_mode)1874 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1875 union ixgbe_atr_input *input,
1876 u16 soft_id, u8 queue, bool cloud_mode)
1877 {
1878 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1879 u32 addr_low, addr_high;
1880 u32 cloud_type = 0;
1881 s32 err;
1882 UNREFERENCED_1PARAMETER(cloud_mode);
1883
1884 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1885 if (!cloud_mode) {
1886 /* currently IPv6 is not supported, must be programmed with 0 */
1887 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1888 input->formatted.src_ip[0]);
1889 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1890 input->formatted.src_ip[1]);
1891 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1892 input->formatted.src_ip[2]);
1893
1894 /* record the source address (big-endian) */
1895 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
1896 input->formatted.src_ip[0]);
1897
1898 /* record the first 32 bits of the destination address
1899 * (big-endian) */
1900 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
1901 input->formatted.dst_ip[0]);
1902
1903 /* record source and destination port (little-endian)*/
1904 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1905 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1906 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1907 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1908 }
1909
1910 /* record VLAN (little-endian) and flex_bytes(big-endian) */
1911 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1912 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1913 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1914 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1915
1916 if (cloud_mode) {
1917 if (input->formatted.tunnel_type != 0)
1918 cloud_type = 0x80000000;
1919
1920 addr_low = ((u32)input->formatted.inner_mac[0] |
1921 ((u32)input->formatted.inner_mac[1] << 8) |
1922 ((u32)input->formatted.inner_mac[2] << 16) |
1923 ((u32)input->formatted.inner_mac[3] << 24));
1924 addr_high = ((u32)input->formatted.inner_mac[4] |
1925 ((u32)input->formatted.inner_mac[5] << 8));
1926 cloud_type |= addr_high;
1927 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
1928 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
1929 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
1930 }
1931
1932 /* configure FDIRHASH register */
1933 fdirhash = input->formatted.bkt_hash;
1934 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1935 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1936
1937 /*
1938 * flush all previous writes to make certain registers are
1939 * programmed prior to issuing the command
1940 */
1941 IXGBE_WRITE_FLUSH(hw);
1942
1943 /* configure FDIRCMD register */
1944 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1945 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1946 if (queue == IXGBE_FDIR_DROP_QUEUE)
1947 fdircmd |= IXGBE_FDIRCMD_DROP;
1948 if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1949 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1950 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1951 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1952 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1953
1954 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1955 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1956 if (err) {
1957 DEBUGOUT("Flow Director command did not complete!\n");
1958 return err;
1959 }
1960
1961 return IXGBE_SUCCESS;
1962 }
1963
ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id)1964 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1965 union ixgbe_atr_input *input,
1966 u16 soft_id)
1967 {
1968 u32 fdirhash;
1969 u32 fdircmd;
1970 s32 err;
1971
1972 /* configure FDIRHASH register */
1973 fdirhash = input->formatted.bkt_hash;
1974 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1975 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1976
1977 /* flush hash to HW */
1978 IXGBE_WRITE_FLUSH(hw);
1979
1980 /* Query if filter is present */
1981 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1982
1983 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1984 if (err) {
1985 DEBUGOUT("Flow Director command did not complete!\n");
1986 return err;
1987 }
1988
1989 /* if filter exists in hardware then remove it */
1990 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1991 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1992 IXGBE_WRITE_FLUSH(hw);
1993 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1994 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1995 }
1996
1997 return IXGBE_SUCCESS;
1998 }
1999
2000 /**
2001 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2002 * @hw: pointer to hardware structure
2003 * @input: input bitstream
2004 * @input_mask: mask for the input bitstream
2005 * @soft_id: software index for the filters
2006 * @queue: queue index to direct traffic to
2007 * @cloud_mode: unused
2008 *
2009 * Note that the caller to this function must lock before calling, since the
2010 * hardware writes must be protected from one another.
2011 **/
ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask,u16 soft_id,u8 queue,bool cloud_mode)2012 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2013 union ixgbe_atr_input *input,
2014 union ixgbe_atr_input *input_mask,
2015 u16 soft_id, u8 queue, bool cloud_mode)
2016 {
2017 s32 err = IXGBE_ERR_CONFIG;
2018 UNREFERENCED_1PARAMETER(cloud_mode);
2019
2020 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2021
2022 /*
2023 * Check flow_type formatting, and bail out before we touch the hardware
2024 * if there's a configuration issue
2025 */
2026 switch (input->formatted.flow_type) {
2027 case IXGBE_ATR_FLOW_TYPE_IPV4:
2028 case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
2029 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2030 if (input->formatted.dst_port || input->formatted.src_port) {
2031 DEBUGOUT(" Error on src/dst port\n");
2032 return IXGBE_ERR_CONFIG;
2033 }
2034 break;
2035 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2036 case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
2037 if (input->formatted.dst_port || input->formatted.src_port) {
2038 DEBUGOUT(" Error on src/dst port\n");
2039 return IXGBE_ERR_CONFIG;
2040 }
2041 /* fall through */
2042 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2043 case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
2044 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2045 case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
2046 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2047 IXGBE_ATR_L4TYPE_MASK;
2048 break;
2049 default:
2050 DEBUGOUT(" Error on flow type input\n");
2051 return err;
2052 }
2053
2054 /* program input mask into the HW */
2055 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2056 if (err)
2057 return err;
2058
2059 /* apply mask and compute/store hash */
2060 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2061
2062 /* program filters to filter memory */
2063 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2064 soft_id, queue, cloud_mode);
2065 }
2066
2067 /**
2068 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2069 * @hw: pointer to hardware structure
2070 * @reg: analog register to read
2071 * @val: read value
2072 *
2073 * Performs read operation to Omer analog register specified.
2074 **/
ixgbe_read_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 * val)2075 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2076 {
2077 u32 core_ctl;
2078
2079 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2080
2081 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2082 (reg << 8));
2083 IXGBE_WRITE_FLUSH(hw);
2084 usec_delay(10);
2085 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2086 *val = (u8)core_ctl;
2087
2088 return IXGBE_SUCCESS;
2089 }
2090
2091 /**
2092 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2093 * @hw: pointer to hardware structure
2094 * @reg: atlas register to write
2095 * @val: value to write
2096 *
2097 * Performs write operation to Omer analog register specified.
2098 **/
ixgbe_write_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 val)2099 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2100 {
2101 u32 core_ctl;
2102
2103 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2104
2105 core_ctl = (reg << 8) | val;
2106 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2107 IXGBE_WRITE_FLUSH(hw);
2108 usec_delay(10);
2109
2110 return IXGBE_SUCCESS;
2111 }
2112
2113 /**
2114 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2115 * @hw: pointer to hardware structure
2116 *
2117 * Starts the hardware using the generic start_hw function
2118 * and the generation start_hw function.
2119 * Then performs revision-specific operations, if any.
2120 **/
ixgbe_start_hw_82599(struct ixgbe_hw * hw)2121 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2122 {
2123 s32 ret_val = IXGBE_SUCCESS;
2124
2125 DEBUGFUNC("ixgbe_start_hw_82599");
2126
2127 ret_val = ixgbe_start_hw_generic(hw);
2128 if (ret_val != IXGBE_SUCCESS)
2129 goto out;
2130
2131 ret_val = ixgbe_start_hw_gen2(hw);
2132 if (ret_val != IXGBE_SUCCESS)
2133 goto out;
2134
2135 /* We need to run link autotry after the driver loads */
2136 hw->mac.autotry_restart = TRUE;
2137
2138 if (ret_val == IXGBE_SUCCESS)
2139 ret_val = ixgbe_verify_fw_version_82599(hw);
2140 out:
2141 return ret_val;
2142 }
2143
2144 /**
2145 * ixgbe_identify_phy_82599 - Get physical layer module
2146 * @hw: pointer to hardware structure
2147 *
2148 * Determines the physical layer module found on the current adapter.
2149 * If PHY already detected, maintains current PHY type in hw struct,
2150 * otherwise executes the PHY detection routine.
2151 **/
ixgbe_identify_phy_82599(struct ixgbe_hw * hw)2152 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2153 {
2154 s32 status;
2155
2156 DEBUGFUNC("ixgbe_identify_phy_82599");
2157
2158 /* Detect PHY if not unknown - returns success if already detected. */
2159 status = ixgbe_identify_phy_generic(hw);
2160 if (status != IXGBE_SUCCESS) {
2161 /* 82599 10GBASE-T requires an external PHY */
2162 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2163 return status;
2164 else
2165 status = ixgbe_identify_module_generic(hw);
2166 }
2167
2168 /* Set PHY type none if no PHY detected */
2169 if (hw->phy.type == ixgbe_phy_unknown) {
2170 hw->phy.type = ixgbe_phy_none;
2171 return IXGBE_SUCCESS;
2172 }
2173
2174 /* Return error if SFP module has been detected but is not supported */
2175 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2176 return IXGBE_ERR_SFP_NOT_SUPPORTED;
2177
2178 return status;
2179 }
2180
2181 /**
2182 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2183 * @hw: pointer to hardware structure
2184 *
2185 * Determines physical layer capabilities of the current configuration.
2186 **/
ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw * hw)2187 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2188 {
2189 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2190 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2191 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2192 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2193 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2194 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2195 u16 ext_ability = 0;
2196
2197 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2198
2199 hw->phy.ops.identify(hw);
2200
2201 switch (hw->phy.type) {
2202 case ixgbe_phy_tn:
2203 case ixgbe_phy_cu_unknown:
2204 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2205 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2206 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2207 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2208 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2209 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2210 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2211 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2212 goto out;
2213 default:
2214 break;
2215 }
2216
2217 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2218 case IXGBE_AUTOC_LMS_1G_AN:
2219 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2220 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2221 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2222 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2223 goto out;
2224 } else
2225 /* SFI mode so read SFP module */
2226 goto sfp_check;
2227 break;
2228 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2229 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2230 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2231 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2232 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2233 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2234 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2235 goto out;
2236 break;
2237 case IXGBE_AUTOC_LMS_10G_SERIAL:
2238 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2239 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2240 goto out;
2241 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2242 goto sfp_check;
2243 break;
2244 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2245 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2246 if (autoc & IXGBE_AUTOC_KX_SUPP)
2247 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2248 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2249 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2250 if (autoc & IXGBE_AUTOC_KR_SUPP)
2251 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2252 goto out;
2253 break;
2254 default:
2255 goto out;
2256 break;
2257 }
2258
2259 sfp_check:
2260 /* SFP check must be done last since DA modules are sometimes used to
2261 * test KR mode - we need to id KR mode correctly before SFP module.
2262 * Call identify_sfp because the pluggable module may have changed */
2263 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2264 out:
2265 return physical_layer;
2266 }
2267
2268 /**
2269 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2270 * @hw: pointer to hardware structure
2271 * @regval: register value to write to RXCTRL
2272 *
2273 * Enables the Rx DMA unit for 82599
2274 **/
ixgbe_enable_rx_dma_82599(struct ixgbe_hw * hw,u32 regval)2275 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2276 {
2277
2278 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2279
2280 /*
2281 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2282 * If traffic is incoming before we enable the Rx unit, it could hang
2283 * the Rx DMA unit. Therefore, make sure the security engine is
2284 * completely disabled prior to enabling the Rx unit.
2285 */
2286
2287 hw->mac.ops.disable_sec_rx_path(hw);
2288
2289 if (regval & IXGBE_RXCTRL_RXEN)
2290 ixgbe_enable_rx(hw);
2291 else
2292 ixgbe_disable_rx(hw);
2293
2294 hw->mac.ops.enable_sec_rx_path(hw);
2295
2296 return IXGBE_SUCCESS;
2297 }
2298
2299 /**
2300 * ixgbe_verify_fw_version_82599 - verify FW version for 82599
2301 * @hw: pointer to hardware structure
2302 *
2303 * Verifies that installed the firmware version is 0.6 or higher
2304 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2305 *
2306 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2307 * if the FW version is not supported.
2308 **/
ixgbe_verify_fw_version_82599(struct ixgbe_hw * hw)2309 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2310 {
2311 s32 status = IXGBE_ERR_EEPROM_VERSION;
2312 u16 fw_offset, fw_ptp_cfg_offset;
2313 u16 fw_version;
2314
2315 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2316
2317 /* firmware check is only necessary for SFI devices */
2318 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2319 status = IXGBE_SUCCESS;
2320 goto fw_version_out;
2321 }
2322
2323 /* get the offset to the Firmware Module block */
2324 if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2325 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2326 "eeprom read at offset %d failed", IXGBE_FW_PTR);
2327 return IXGBE_ERR_EEPROM_VERSION;
2328 }
2329
2330 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2331 goto fw_version_out;
2332
2333 /* get the offset to the Pass Through Patch Configuration block */
2334 if (hw->eeprom.ops.read(hw, (fw_offset +
2335 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2336 &fw_ptp_cfg_offset)) {
2337 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2338 "eeprom read at offset %d failed",
2339 fw_offset +
2340 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2341 return IXGBE_ERR_EEPROM_VERSION;
2342 }
2343
2344 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2345 goto fw_version_out;
2346
2347 /* get the firmware version */
2348 if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2349 IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2350 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2351 "eeprom read at offset %d failed",
2352 fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2353 return IXGBE_ERR_EEPROM_VERSION;
2354 }
2355
2356 if (fw_version > 0x5)
2357 status = IXGBE_SUCCESS;
2358
2359 fw_version_out:
2360 return status;
2361 }
2362
2363 /**
2364 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2365 * @hw: pointer to hardware structure
2366 *
2367 * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2368 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2369 **/
ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw * hw)2370 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2371 {
2372 bool lesm_enabled = FALSE;
2373 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2374 s32 status;
2375
2376 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2377
2378 /* get the offset to the Firmware Module block */
2379 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2380
2381 if ((status != IXGBE_SUCCESS) ||
2382 (fw_offset == 0) || (fw_offset == 0xFFFF))
2383 goto out;
2384
2385 /* get the offset to the LESM Parameters block */
2386 status = hw->eeprom.ops.read(hw, (fw_offset +
2387 IXGBE_FW_LESM_PARAMETERS_PTR),
2388 &fw_lesm_param_offset);
2389
2390 if ((status != IXGBE_SUCCESS) ||
2391 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2392 goto out;
2393
2394 /* get the LESM state word */
2395 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2396 IXGBE_FW_LESM_STATE_1),
2397 &fw_lesm_state);
2398
2399 if ((status == IXGBE_SUCCESS) &&
2400 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2401 lesm_enabled = TRUE;
2402
2403 out:
2404 return lesm_enabled;
2405 }
2406
2407 /**
2408 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2409 * fastest available method
2410 *
2411 * @hw: pointer to hardware structure
2412 * @offset: offset of word in EEPROM to read
2413 * @words: number of words
2414 * @data: word(s) read from the EEPROM
2415 *
2416 * Retrieves 16 bit word(s) read from EEPROM
2417 **/
ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)2418 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2419 u16 words, u16 *data)
2420 {
2421 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2422 s32 ret_val = IXGBE_ERR_CONFIG;
2423
2424 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2425
2426 /*
2427 * If EEPROM is detected and can be addressed using 14 bits,
2428 * use EERD otherwise use bit bang
2429 */
2430 if ((eeprom->type == ixgbe_eeprom_spi) &&
2431 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2432 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2433 data);
2434 else
2435 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2436 words,
2437 data);
2438
2439 return ret_val;
2440 }
2441
2442 /**
2443 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2444 * fastest available method
2445 *
2446 * @hw: pointer to hardware structure
2447 * @offset: offset of word in the EEPROM to read
2448 * @data: word read from the EEPROM
2449 *
2450 * Reads a 16 bit word from the EEPROM
2451 **/
ixgbe_read_eeprom_82599(struct ixgbe_hw * hw,u16 offset,u16 * data)2452 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2453 u16 offset, u16 *data)
2454 {
2455 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2456 s32 ret_val = IXGBE_ERR_CONFIG;
2457
2458 DEBUGFUNC("ixgbe_read_eeprom_82599");
2459
2460 /*
2461 * If EEPROM is detected and can be addressed using 14 bits,
2462 * use EERD otherwise use bit bang
2463 */
2464 if ((eeprom->type == ixgbe_eeprom_spi) &&
2465 (offset <= IXGBE_EERD_MAX_ADDR))
2466 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2467 else
2468 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2469
2470 return ret_val;
2471 }
2472
2473 /**
2474 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2475 *
2476 * @hw: pointer to hardware structure
2477 *
2478 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2479 * full pipeline reset. This function assumes the SW/FW lock is held.
2480 **/
ixgbe_reset_pipeline_82599(struct ixgbe_hw * hw)2481 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2482 {
2483 s32 ret_val;
2484 u32 anlp1_reg = 0;
2485 u32 i, autoc_reg, autoc2_reg;
2486
2487 /* Enable link if disabled in NVM */
2488 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2489 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2490 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2491 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2492 IXGBE_WRITE_FLUSH(hw);
2493 }
2494
2495 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2496 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2497 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2498 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2499 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2500 /* Wait for AN to leave state 0 */
2501 for (i = 0; i < 10; i++) {
2502 msec_delay(4);
2503 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2504 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2505 break;
2506 }
2507
2508 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2509 DEBUGOUT("auto negotiation not completed\n");
2510 ret_val = IXGBE_ERR_RESET_FAILED;
2511 goto reset_pipeline_out;
2512 }
2513
2514 ret_val = IXGBE_SUCCESS;
2515
2516 reset_pipeline_out:
2517 /* Write AUTOC register with original LMS field and Restart_AN */
2518 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2519 IXGBE_WRITE_FLUSH(hw);
2520
2521 return ret_val;
2522 }
2523
2524 /**
2525 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2526 * @hw: pointer to hardware structure
2527 * @byte_offset: byte offset to read
2528 * @dev_addr: address to read from
2529 * @data: value read
2530 *
2531 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2532 * a specified device address.
2533 **/
ixgbe_read_i2c_byte_82599(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data)2534 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2535 u8 dev_addr, u8 *data)
2536 {
2537 u32 esdp;
2538 s32 status;
2539 s32 timeout = 200;
2540
2541 DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2542
2543 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2544 /* Acquire I2C bus ownership. */
2545 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2546 esdp |= IXGBE_ESDP_SDP0;
2547 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2548 IXGBE_WRITE_FLUSH(hw);
2549
2550 while (timeout) {
2551 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2552 if (esdp & IXGBE_ESDP_SDP1)
2553 break;
2554
2555 msec_delay(5);
2556 timeout--;
2557 }
2558
2559 if (!timeout) {
2560 DEBUGOUT("Driver can't access resource,"
2561 " acquiring I2C bus timeout.\n");
2562 status = IXGBE_ERR_I2C;
2563 goto release_i2c_access;
2564 }
2565 }
2566
2567 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2568
2569 release_i2c_access:
2570
2571 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2572 /* Release I2C bus ownership. */
2573 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2574 esdp &= ~IXGBE_ESDP_SDP0;
2575 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2576 IXGBE_WRITE_FLUSH(hw);
2577 }
2578
2579 return status;
2580 }
2581
2582 /**
2583 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2584 * @hw: pointer to hardware structure
2585 * @byte_offset: byte offset to write
2586 * @dev_addr: address to read from
2587 * @data: value to write
2588 *
2589 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2590 * a specified device address.
2591 **/
ixgbe_write_i2c_byte_82599(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 data)2592 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2593 u8 dev_addr, u8 data)
2594 {
2595 u32 esdp;
2596 s32 status;
2597 s32 timeout = 200;
2598
2599 DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2600
2601 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2602 /* Acquire I2C bus ownership. */
2603 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2604 esdp |= IXGBE_ESDP_SDP0;
2605 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2606 IXGBE_WRITE_FLUSH(hw);
2607
2608 while (timeout) {
2609 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2610 if (esdp & IXGBE_ESDP_SDP1)
2611 break;
2612
2613 msec_delay(5);
2614 timeout--;
2615 }
2616
2617 if (!timeout) {
2618 DEBUGOUT("Driver can't access resource,"
2619 " acquiring I2C bus timeout.\n");
2620 status = IXGBE_ERR_I2C;
2621 goto release_i2c_access;
2622 }
2623 }
2624
2625 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2626
2627 release_i2c_access:
2628
2629 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2630 /* Release I2C bus ownership. */
2631 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2632 esdp &= ~IXGBE_ESDP_SDP0;
2633 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2634 IXGBE_WRITE_FLUSH(hw);
2635 }
2636
2637 return status;
2638 }
2639