1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 #ifndef __ECORE_DEV_API_H__ 30 #define __ECORE_DEV_API_H__ 31 32 #include "ecore_status.h" 33 #include "ecore_chain.h" 34 #include "ecore_int_api.h" 35 36 #define ECORE_DEFAULT_ILT_PAGE_SIZE 4 37 38 struct ecore_wake_info { 39 u32 wk_info; 40 u32 wk_details; 41 u32 wk_pkt_len; 42 u8 wk_buffer[256]; 43 }; 44 45 /** 46 * @brief ecore_init_dp - initialize the debug level 47 * 48 * @param p_dev 49 * @param dp_module 50 * @param dp_level 51 * @param dp_ctx 52 */ 53 void ecore_init_dp(struct ecore_dev *p_dev, 54 u32 dp_module, 55 u8 dp_level, 56 void *dp_ctx); 57 58 /** 59 * @brief ecore_init_struct - initialize the device structure to 60 * its defaults 61 * 62 * @param p_dev 63 */ 64 enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev); 65 66 /** 67 * @brief ecore_resc_free - 68 * 69 * @param p_dev 70 */ 71 void ecore_resc_free(struct ecore_dev *p_dev); 72 73 /** 74 * @brief ecore_resc_alloc - 75 * 76 * @param p_dev 77 * 78 * @return enum _ecore_status_t 79 */ 80 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev); 81 82 /** 83 * @brief ecore_resc_setup - 84 * 85 * @param p_dev 86 */ 87 void ecore_resc_setup(struct ecore_dev *p_dev); 88 89 enum ecore_mfw_timeout_fallback { 90 ECORE_TO_FALLBACK_TO_NONE, 91 ECORE_TO_FALLBACK_TO_DEFAULT, 92 ECORE_TO_FALLBACK_FAIL_LOAD, 93 }; 94 95 enum ecore_override_force_load { 96 ECORE_OVERRIDE_FORCE_LOAD_NONE, 97 ECORE_OVERRIDE_FORCE_LOAD_ALWAYS, 98 ECORE_OVERRIDE_FORCE_LOAD_NEVER, 99 }; 100 101 struct ecore_drv_load_params { 102 /* Indicates whether the driver is running over a crash kernel. 103 * As part of the load request, this will be used for providing the 104 * driver role to the MFW. 105 * In case of a crash kernel over PDA - this should be set to false. 106 */ 107 bool is_crash_kernel; 108 109 /* The timeout value that the MFW should use when locking the engine for 110 * the driver load process. 111 * A value of '0' means the default value, and '255' means no timeout. 112 */ 113 u8 mfw_timeout_val; 114 #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0 115 #define ECORE_LOAD_REQ_LOCK_TO_NONE 255 116 117 /* Action to take in case the MFW doesn't support timeout values other 118 * then default and none. 119 */ 120 enum ecore_mfw_timeout_fallback mfw_timeout_fallback; 121 122 /* Avoid engine reset when first PF loads on it */ 123 bool avoid_eng_reset; 124 125 /* Allow overriding the default force load behavior */ 126 enum ecore_override_force_load override_force_load; 127 }; 128 129 struct ecore_hw_init_params { 130 /* Tunneling parameters */ 131 struct ecore_tunnel_info *p_tunn; 132 133 bool b_hw_start; 134 135 /* Interrupt mode [msix, inta, etc.] to use */ 136 enum ecore_int_mode int_mode; 137 138 /* NPAR tx switching to be used for vports configured for tx-switching */ 139 bool allow_npar_tx_switch; 140 141 /* PCI relax ordering to be configured by MFW or ecore client */ 142 enum ecore_pci_rlx_odr pci_rlx_odr_mode; 143 144 /* Binary fw data pointer in binary fw file */ 145 const u8 *bin_fw_data; 146 147 /* Driver load parameters */ 148 struct ecore_drv_load_params *p_drv_load_params; 149 150 /* Avoid engine affinity for RoCE/storage in case of CMT mode */ 151 bool avoid_eng_affin; 152 }; 153 154 /** 155 * @brief ecore_hw_init - 156 * 157 * @param p_dev 158 * @param p_params 159 * 160 * @return enum _ecore_status_t 161 */ 162 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 163 struct ecore_hw_init_params *p_params); 164 165 /** 166 * @brief ecore_hw_timers_stop_all - 167 * 168 * @param p_dev 169 * 170 * @return void 171 */ 172 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev); 173 174 /** 175 * @brief ecore_hw_stop - 176 * 177 * @param p_dev 178 * 179 * @return enum _ecore_status_t 180 */ 181 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev); 182 183 /** 184 * @brief ecore_hw_stop_fastpath -should be called incase 185 * slowpath is still required for the device, 186 * but fastpath is not. 187 * 188 * @param p_dev 189 * 190 * @return enum _ecore_status_t 191 */ 192 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev); 193 194 #ifndef LINUX_REMOVE 195 /** 196 * @brief ecore_hw_hibernate_prepare -should be called when 197 * the system is going into the hibernate state 198 * 199 * @param p_dev 200 * 201 */ 202 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev); 203 204 /** 205 * @brief ecore_hw_hibernate_resume -should be called when the system is 206 resuming from D3 power state and before calling ecore_hw_init. 207 * 208 * @param p_hwfn 209 * 210 */ 211 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev); 212 213 #endif 214 215 /** 216 * @brief ecore_hw_start_fastpath -restart fastpath traffic, 217 * only if hw_stop_fastpath was called 218 219 * @param p_hwfn 220 * 221 * @return enum _ecore_status_t 222 */ 223 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn); 224 225 enum ecore_hw_prepare_result { 226 ECORE_HW_PREPARE_SUCCESS, 227 228 /* FAILED results indicate probe has failed & cleaned up */ 229 ECORE_HW_PREPARE_FAILED_ENG2, 230 ECORE_HW_PREPARE_FAILED_ME, 231 ECORE_HW_PREPARE_FAILED_MEM, 232 ECORE_HW_PREPARE_FAILED_DEV, 233 ECORE_HW_PREPARE_FAILED_NVM, 234 235 /* BAD results indicate probe is passed even though some wrongness 236 * has occurred; Trying to actually use [I.e., hw_init()] might have 237 * dire reprecautions. 238 */ 239 ECORE_HW_PREPARE_BAD_IOV, 240 ECORE_HW_PREPARE_BAD_MCP, 241 ECORE_HW_PREPARE_BAD_IGU, 242 }; 243 244 struct ecore_hw_prepare_params { 245 /* Personality to initialize */ 246 int personality; 247 248 /* Force the driver's default resource allocation */ 249 bool drv_resc_alloc; 250 251 /* Check the reg_fifo after any register access */ 252 bool chk_reg_fifo; 253 254 /* Request the MFW to initiate PF FLR */ 255 bool initiate_pf_flr; 256 257 /* The OS Epoch time in seconds */ 258 u32 epoch; 259 260 /* Allow the MFW to collect a crash dump */ 261 bool allow_mdump; 262 263 /* Allow prepare to pass even if some initializations are failing. 264 * If set, the `p_prepare_res' field would be set with the return, 265 * and might allow probe to pass even if there are certain issues. 266 */ 267 bool b_relaxed_probe; 268 enum ecore_hw_prepare_result p_relaxed_res; 269 }; 270 271 /** 272 * @brief ecore_hw_prepare - 273 * 274 * @param p_dev 275 * @param p_params 276 * 277 * @return enum _ecore_status_t 278 */ 279 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, 280 struct ecore_hw_prepare_params *p_params); 281 282 /** 283 * @brief ecore_hw_remove - 284 * 285 * @param p_dev 286 */ 287 void ecore_hw_remove(struct ecore_dev *p_dev); 288 289 /** 290 * @brief ecore_set_nwuf_reg - 291 * 292 * @param p_dev 293 * @param reg_idx - Index of the pattern register 294 * @param pattern_size - size of pattern 295 * @param crc - CRC value of patter & mask 296 * 297 * @return enum _ecore_status_t 298 */ 299 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, 300 u32 reg_idx, u32 pattern_size, u32 crc); 301 302 /** 303 * @brief ecore_get_wake_info - get magic packet buffer 304 * 305 * @param p_hwfn 306 * @param p_ppt 307 * @param wake_info - pointer to ecore_wake_info buffer 308 * 309 * @return enum _ecore_status_t 310 */ 311 enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn, 312 struct ecore_ptt *p_ptt, 313 struct ecore_wake_info *wake_info); 314 315 /** 316 * @brief ecore_wol_buffer_clear - Clear magic package buffer 317 * 318 * @param p_hwfn 319 * @param p_ptt 320 * 321 * @return void 322 */ 323 void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn, 324 struct ecore_ptt *p_ptt); 325 326 /** 327 * @brief ecore_ptt_acquire - Allocate a PTT window 328 * 329 * Should be called at the entry point to the driver (at the beginning of an 330 * exported function) 331 * 332 * @param p_hwfn 333 * 334 * @return struct ecore_ptt 335 */ 336 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn); 337 338 /** 339 * @brief ecore_ptt_release - Release PTT Window 340 * 341 * Should be called at the end of a flow - at the end of the function that 342 * acquired the PTT. 343 * 344 * 345 * @param p_hwfn 346 * @param p_ptt 347 */ 348 void ecore_ptt_release(struct ecore_hwfn *p_hwfn, 349 struct ecore_ptt *p_ptt); 350 351 /** 352 * @brief ecore_get_dev_name - get device name, e.g., "BB B0" 353 * 354 * @param p_hwfn 355 * @param name - this is where the name will be written to 356 * @param max_chars - maximum chars that can be written to name including '\0' 357 */ 358 void ecore_get_dev_name(struct ecore_dev *p_dev, 359 u8 *name, 360 u8 max_chars); 361 362 #ifndef __EXTRACT__LINUX__IF__ 363 struct ecore_eth_stats_common { 364 u64 no_buff_discards; 365 u64 packet_too_big_discard; 366 u64 ttl0_discard; 367 u64 rx_ucast_bytes; 368 u64 rx_mcast_bytes; 369 u64 rx_bcast_bytes; 370 u64 rx_ucast_pkts; 371 u64 rx_mcast_pkts; 372 u64 rx_bcast_pkts; 373 u64 mftag_filter_discards; 374 u64 mac_filter_discards; 375 u64 tx_ucast_bytes; 376 u64 tx_mcast_bytes; 377 u64 tx_bcast_bytes; 378 u64 tx_ucast_pkts; 379 u64 tx_mcast_pkts; 380 u64 tx_bcast_pkts; 381 u64 tx_err_drop_pkts; 382 u64 tpa_coalesced_pkts; 383 u64 tpa_coalesced_events; 384 u64 tpa_aborts_num; 385 u64 tpa_not_coalesced_pkts; 386 u64 tpa_coalesced_bytes; 387 388 /* port */ 389 u64 rx_64_byte_packets; 390 u64 rx_65_to_127_byte_packets; 391 u64 rx_128_to_255_byte_packets; 392 u64 rx_256_to_511_byte_packets; 393 u64 rx_512_to_1023_byte_packets; 394 u64 rx_1024_to_1518_byte_packets; 395 u64 rx_crc_errors; 396 u64 rx_mac_crtl_frames; 397 u64 rx_pause_frames; 398 u64 rx_pfc_frames; 399 u64 rx_align_errors; 400 u64 rx_carrier_errors; 401 u64 rx_oversize_packets; 402 u64 rx_jabbers; 403 u64 rx_undersize_packets; 404 u64 rx_fragments; 405 u64 tx_64_byte_packets; 406 u64 tx_65_to_127_byte_packets; 407 u64 tx_128_to_255_byte_packets; 408 u64 tx_256_to_511_byte_packets; 409 u64 tx_512_to_1023_byte_packets; 410 u64 tx_1024_to_1518_byte_packets; 411 u64 tx_pause_frames; 412 u64 tx_pfc_frames; 413 u64 brb_truncates; 414 u64 brb_discards; 415 u64 rx_mac_bytes; 416 u64 rx_mac_uc_packets; 417 u64 rx_mac_mc_packets; 418 u64 rx_mac_bc_packets; 419 u64 rx_mac_frames_ok; 420 u64 tx_mac_bytes; 421 u64 tx_mac_uc_packets; 422 u64 tx_mac_mc_packets; 423 u64 tx_mac_bc_packets; 424 u64 tx_mac_ctrl_frames; 425 u64 link_change_count; 426 }; 427 428 struct ecore_eth_stats_bb { 429 u64 rx_1519_to_1522_byte_packets; 430 u64 rx_1519_to_2047_byte_packets; 431 u64 rx_2048_to_4095_byte_packets; 432 u64 rx_4096_to_9216_byte_packets; 433 u64 rx_9217_to_16383_byte_packets; 434 u64 tx_1519_to_2047_byte_packets; 435 u64 tx_2048_to_4095_byte_packets; 436 u64 tx_4096_to_9216_byte_packets; 437 u64 tx_9217_to_16383_byte_packets; 438 u64 tx_lpi_entry_count; 439 u64 tx_total_collisions; 440 }; 441 442 struct ecore_eth_stats_ah { 443 u64 rx_1519_to_max_byte_packets; 444 u64 tx_1519_to_max_byte_packets; 445 }; 446 447 struct ecore_eth_stats { 448 struct ecore_eth_stats_common common; 449 union { 450 struct ecore_eth_stats_bb bb; 451 struct ecore_eth_stats_ah ah; 452 }; 453 }; 454 #endif 455 456 enum ecore_dmae_address_type_t { 457 ECORE_DMAE_ADDRESS_HOST_VIRT, 458 ECORE_DMAE_ADDRESS_HOST_PHYS, 459 ECORE_DMAE_ADDRESS_GRC 460 }; 461 462 /* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the 463 * source is a block of length DMAE_MAX_RW_SIZE and the 464 * destination is larger, the source block will be duplicated as 465 * many times as required to fill the destination block. This is 466 * used mostly to write a zeroed buffer to destination address 467 * using DMA 468 */ 469 #define ECORE_DMAE_FLAG_RW_REPL_SRC 0x00000001 470 #define ECORE_DMAE_FLAG_VF_SRC 0x00000002 471 #define ECORE_DMAE_FLAG_VF_DST 0x00000004 472 #define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008 473 #define ECORE_DMAE_FLAG_PORT 0x00000010 474 #define ECORE_DMAE_FLAG_PF_SRC 0x00000020 475 #define ECORE_DMAE_FLAG_PF_DST 0x00000040 476 477 struct ecore_dmae_params { 478 u32 flags; /* consists of ECORE_DMAE_FLAG_* values */ 479 u8 src_vfid; 480 u8 dst_vfid; 481 u8 port_id; 482 u8 src_pfid; 483 u8 dst_pfid; 484 }; 485 486 /** 487 * @brief ecore_dmae_host2grc - copy data from source addr to 488 * dmae registers using the given ptt 489 * 490 * @param p_hwfn 491 * @param p_ptt 492 * @param source_addr 493 * @param grc_addr (dmae_data_offset) 494 * @param size_in_dwords 495 * @param p_params (default parameters will be used in case of OSAL_NULL) 496 * 497 * @return enum _ecore_status_t 498 */ 499 enum _ecore_status_t 500 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, 501 struct ecore_ptt *p_ptt, 502 u64 source_addr, 503 u32 grc_addr, 504 u32 size_in_dwords, 505 struct ecore_dmae_params *p_params); 506 507 /** 508 * @brief ecore_dmae_grc2host - Read data from dmae data offset 509 * to source address using the given ptt 510 * 511 * @param p_ptt 512 * @param grc_addr (dmae_data_offset) 513 * @param dest_addr 514 * @param size_in_dwords 515 * @param p_params (default parameters will be used in case of OSAL_NULL) 516 * 517 * @return enum _ecore_status_t 518 */ 519 enum _ecore_status_t 520 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, 521 struct ecore_ptt *p_ptt, 522 u32 grc_addr, 523 dma_addr_t dest_addr, 524 u32 size_in_dwords, 525 struct ecore_dmae_params *p_params); 526 527 /** 528 * @brief ecore_dmae_host2host - copy data from to source address 529 * to a destination adress (for SRIOV) using the given ptt 530 * 531 * @param p_hwfn 532 * @param p_ptt 533 * @param source_addr 534 * @param dest_addr 535 * @param size_in_dwords 536 * @param p_params (default parameters will be used in case of OSAL_NULL) 537 * 538 * @return enum _ecore_status_t 539 */ 540 enum _ecore_status_t 541 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, 542 struct ecore_ptt *p_ptt, 543 dma_addr_t source_addr, 544 dma_addr_t dest_addr, 545 u32 size_in_dwords, 546 struct ecore_dmae_params *p_params); 547 548 /** 549 * @brief ecore_chain_alloc - Allocate and initialize a chain 550 * 551 * @param p_hwfn 552 * @param intended_use 553 * @param mode 554 * @param num_elems 555 * @param elem_size 556 * @param p_chain 557 * 558 * @return enum _ecore_status_t 559 */ 560 enum _ecore_status_t 561 ecore_chain_alloc(struct ecore_dev *p_dev, 562 enum ecore_chain_use_mode intended_use, 563 enum ecore_chain_mode mode, 564 enum ecore_chain_cnt_type cnt_type, 565 u32 num_elems, 566 osal_size_t elem_size, 567 struct ecore_chain *p_chain, 568 struct ecore_chain_ext_pbl *ext_pbl); 569 570 /** 571 * @brief ecore_chain_free - Free chain DMA memory 572 * 573 * @param p_hwfn 574 * @param p_chain 575 */ 576 void ecore_chain_free(struct ecore_dev *p_dev, 577 struct ecore_chain *p_chain); 578 579 /** 580 * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID 581 * 582 * @param p_hwfn 583 * @param src_id - relative to p_hwfn 584 * @param dst_id - absolute per engine 585 * 586 * @return enum _ecore_status_t 587 */ 588 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, 589 u16 src_id, 590 u16 *dst_id); 591 592 /** 593 * @@brief ecore_fw_vport - Get absolute vport ID 594 * 595 * @param p_hwfn 596 * @param src_id - relative to p_hwfn 597 * @param dst_id - absolute per engine 598 * 599 * @return enum _ecore_status_t 600 */ 601 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, 602 u8 src_id, 603 u8 *dst_id); 604 605 /** 606 * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID 607 * 608 * @param p_hwfn 609 * @param src_id - relative to p_hwfn 610 * @param dst_id - absolute per engine 611 * 612 * @return enum _ecore_status_t 613 */ 614 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, 615 u8 src_id, 616 u8 *dst_id); 617 618 /** 619 * @brief ecore_llh_get_num_ppfid - Return the allocated number of LLH filter 620 * banks that are allocated to the PF. 621 * 622 * @param p_dev 623 * 624 * @return u8 - Number of LLH filter banks 625 */ 626 u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev); 627 628 enum ecore_eng { 629 ECORE_ENG0, 630 ECORE_ENG1, 631 ECORE_BOTH_ENG, 632 }; 633 634 /** 635 * @brief ecore_llh_get_l2_affinity_hint - Return the hint for the L2 affinity 636 * 637 * @param p_dev 638 * 639 * @return enum ecore_eng - L2 affintiy hint 640 */ 641 enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev); 642 643 /** 644 * @brief ecore_llh_set_ppfid_affinity - Set the engine affinity for the given 645 * LLH filter bank. 646 * 647 * @param p_dev 648 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 649 * @param eng 650 * 651 * @return enum _ecore_status_t 652 */ 653 enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev, 654 u8 ppfid, enum ecore_eng eng); 655 656 /** 657 * @brief ecore_llh_set_roce_affinity - Set the RoCE engine affinity 658 * 659 * @param p_dev 660 * @param eng 661 * 662 * @return enum _ecore_status_t 663 */ 664 enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev, 665 enum ecore_eng eng); 666 667 /** 668 * @brief ecore_llh_add_mac_filter - Add a LLH MAC filter into the given filter 669 * bank. 670 * 671 * @param p_dev 672 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 673 * @param mac_addr - MAC to add 674 * 675 * @return enum _ecore_status_t 676 */ 677 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid, 678 u8 mac_addr[ETH_ALEN]); 679 680 /** 681 * @brief ecore_llh_remove_mac_filter - Remove a LLH MAC filter from the given 682 * filter bank. 683 * 684 * @param p_dev 685 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 686 * @param mac_addr - MAC to remove 687 */ 688 void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid, 689 u8 mac_addr[ETH_ALEN]); 690 691 enum ecore_llh_prot_filter_type_t { 692 ECORE_LLH_FILTER_ETHERTYPE, 693 ECORE_LLH_FILTER_TCP_SRC_PORT, 694 ECORE_LLH_FILTER_TCP_DEST_PORT, 695 ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT, 696 ECORE_LLH_FILTER_UDP_SRC_PORT, 697 ECORE_LLH_FILTER_UDP_DEST_PORT, 698 ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT 699 }; 700 701 /** 702 * @brief ecore_llh_add_protocol_filter - Add a LLH protocol filter into the 703 * given filter bank. 704 * 705 * @param p_dev 706 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 707 * @param type - type of filters and comparing 708 * @param source_port_or_eth_type - source port or ethertype to add 709 * @param dest_port - destination port to add 710 * 711 * @return enum _ecore_status_t 712 */ 713 enum _ecore_status_t 714 ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, 715 enum ecore_llh_prot_filter_type_t type, 716 u16 source_port_or_eth_type, u16 dest_port); 717 718 /** 719 * @brief ecore_llh_remove_protocol_filter - Remove a LLH protocol filter from 720 * the given filter bank. 721 * 722 * @param p_dev 723 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 724 * @param type - type of filters and comparing 725 * @param source_port_or_eth_type - source port or ethertype to add 726 * @param dest_port - destination port to add 727 */ 728 void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, 729 enum ecore_llh_prot_filter_type_t type, 730 u16 source_port_or_eth_type, 731 u16 dest_port); 732 733 /** 734 * @brief ecore_llh_clear_ppfid_filters - Remove all LLH filters from the given 735 * filter bank. 736 * 737 * @param p_dev 738 * @param ppfid - relative within the allocated ppfids ('0' is the default one). 739 */ 740 void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid); 741 742 /** 743 * @brief ecore_llh_clear_all_filters - Remove all LLH filters 744 * 745 * @param p_dev 746 */ 747 void ecore_llh_clear_all_filters(struct ecore_dev *p_dev); 748 749 /** 750 * @brief ecore_llh_set_function_as_default - set function as default per port 751 * 752 * @param p_hwfn 753 * @param p_ptt 754 */ 755 enum _ecore_status_t 756 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, 757 struct ecore_ptt *p_ptt); 758 759 /** 760 *@brief Cleanup of previous driver remains prior to load 761 * 762 * @param p_hwfn 763 * @param p_ptt 764 * @param id - For PF, engine-relative. For VF, PF-relative. 765 * @param is_vf - true iff cleanup is made for a VF. 766 * 767 * @return enum _ecore_status_t 768 */ 769 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, 770 struct ecore_ptt *p_ptt, 771 u16 id, 772 bool is_vf); 773 774 /** 775 * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue. 776 * 777 * @param p_hwfn 778 * @param p_coal - store coalesce value read from the hardware. 779 * @param p_handle 780 * 781 * @return enum _ecore_status_t 782 **/ 783 enum _ecore_status_t 784 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal, 785 void *handle); 786 787 /** 788 * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and 789 * Tx queue. The fact that we can configure coalescing to up to 511, but on 790 * varying accuracy [the bigger the value the less accurate] up to a mistake 791 * of 3usec for the highest values. 792 * While the API allows setting coalescing per-qid, all queues sharing a SB 793 * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] 794 * otherwise configuration would break. 795 * 796 * @param p_hwfn 797 * @param rx_coal - Rx Coalesce value in micro seconds. 798 * @param tx_coal - TX Coalesce value in micro seconds. 799 * @param p_handle 800 * 801 * @return enum _ecore_status_t 802 **/ 803 enum _ecore_status_t 804 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, 805 u16 tx_coal, void *p_handle); 806 807 /** 808 * @brief - Recalculate feature distributions based on HW resources and 809 * user inputs. Currently this affects RDMA_CNQ, PF_L2_QUE and VF_L2_QUE. 810 * As a result, this must not be called while RDMA is active or while VFs 811 * are enabled. 812 * 813 * @param p_hwfn 814 */ 815 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn); 816 817 /** 818 * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER 819 * 820 * @param p_hwfn 821 * @param p_ptt 822 * @param b_enable - true/false 823 * 824 * @return enum _ecore_status_t 825 */ 826 enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, 827 struct ecore_ptt *p_ptt, 828 bool b_enable); 829 830 #ifndef __EXTRACT__LINUX__IF__ 831 enum ecore_db_rec_width { 832 DB_REC_WIDTH_32B, 833 DB_REC_WIDTH_64B, 834 }; 835 836 enum ecore_db_rec_space { 837 DB_REC_KERNEL, 838 DB_REC_USER, 839 }; 840 #endif 841 842 /** 843 * @brief db_recovery_add - add doorbell information to the doorbell 844 * recovery mechanism. 845 * 846 * @param p_dev 847 * @param db_addr - doorbell address 848 * @param db_data - address of where db_data is stored 849 * @param db_width - doorbell is 32b pr 64b 850 * @param db_space - doorbell recovery addresses are user or kernel space 851 */ 852 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev, 853 void OSAL_IOMEM *db_addr, 854 void *db_data, 855 enum ecore_db_rec_width db_width, 856 enum ecore_db_rec_space db_space); 857 858 /** 859 * @brief db_recovery_del - remove doorbell information from the doorbell 860 * recovery mechanism. db_data serves as key (db_addr is not unique). 861 * 862 * @param cdev 863 * @param db_addr - doorbell address 864 * @param db_data - address where db_data is stored. Serves as key for the 865 * entry to delete. 866 */ 867 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev, 868 void OSAL_IOMEM *db_addr, 869 void *db_data); 870 871 #ifndef __EXTRACT__LINUX__THROW__ 872 static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn) 873 { 874 return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits); 875 } 876 #endif 877 878 /** 879 * @brief ecore_set_dev_access_enable - Enable or disable access to the device 880 * 881 * @param p_hwfn 882 * @param b_enable - true/false 883 */ 884 void ecore_set_dev_access_enable(struct ecore_dev *p_dev, bool b_enable); 885 886 /** 887 * @brief ecore_set_ilt_page_size - Set ILT page size 888 * 889 * @param p_dev 890 * @param ilt_size 891 * 892 * @return enum _ecore_status_t 893 */ 894 void ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_size); 895 896 #endif 897