1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 * 29 */ 30 31 #ifndef __ECORE_VF_PF_IF_H__ 32 #define __ECORE_VF_PF_IF_H__ 33 34 #define T_ETH_INDIRECTION_TABLE_SIZE 128 /* @@@ TBD MichalK this should be HSI? */ 35 #define T_ETH_RSS_KEY_SIZE 10 /* @@@ TBD this should be HSI? */ 36 #define ETH_ALEN 6 /* @@@ TBD MichalK - should this be defined here?*/ 37 38 /*********************************************** 39 * 40 * Common definitions for all HVs 41 * 42 **/ 43 struct vf_pf_resc_request { 44 u8 num_rxqs; 45 u8 num_txqs; 46 u8 num_sbs; 47 u8 num_mac_filters; 48 u8 num_vlan_filters; 49 u8 num_mc_filters; /* No limit so superfluous */ 50 u8 num_cids; 51 u8 padding; 52 }; 53 54 struct hw_sb_info { 55 u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */ 56 u8 sb_qid; /* used to update DHC for sb */ 57 u8 padding[5]; 58 }; 59 60 /*********************************************** 61 * 62 * HW VF-PF channel definitions 63 * 64 * A.K.A VF-PF mailbox 65 * 66 **/ 67 #define TLV_BUFFER_SIZE 1024 68 69 /* vf pf channel tlvs */ 70 /* general tlv header (used for both vf->pf request and pf->vf response) */ 71 struct channel_tlv { 72 u16 type; 73 u16 length; 74 }; 75 76 /* header of first vf->pf tlv carries the offset used to calculate reponse 77 * buffer address 78 */ 79 struct vfpf_first_tlv { 80 struct channel_tlv tl; 81 u32 padding; 82 u64 reply_address; 83 }; 84 85 /* header of pf->vf tlvs, carries the status of handling the request */ 86 struct pfvf_tlv { 87 struct channel_tlv tl; 88 u8 status; 89 u8 padding[3]; 90 }; 91 92 /* response tlv used for most tlvs */ 93 struct pfvf_def_resp_tlv { 94 struct pfvf_tlv hdr; 95 }; 96 97 /* used to terminate and pad a tlv list */ 98 struct channel_list_end_tlv { 99 struct channel_tlv tl; 100 u8 padding[4]; 101 }; 102 103 /* Acquire */ 104 struct vfpf_acquire_tlv { 105 struct vfpf_first_tlv first_tlv; 106 107 struct vf_pf_vfdev_info { 108 /* First bit was used on 8.7.x and 8.8.x versions, which had different 109 * FWs used but with the same faspath HSI. As this was prior to the 110 * fastpath versioning, wanted to have ability to override fw matching 111 * and allow them to interact. 112 */ 113 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ 114 #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ 115 116 /* A requirement for supporting multi-Tx queues on a single queue-zone, 117 * VF would pass qids as additional information whenever passing queue 118 * references. 119 * TODO - due to the CID limitations in Bar0, VFs currently don't pass 120 * this, and use the legacy CID scheme. 121 */ 122 #define VFPF_ACQUIRE_CAP_QUEUE_QIDS (1 << 2) 123 u64 capabilities; 124 u8 fw_major; 125 u8 fw_minor; 126 u8 fw_revision; 127 u8 fw_engineering; 128 u32 driver_version; 129 u16 opaque_fid; /* ME register value */ 130 u8 os_type; /* VFPF_ACQUIRE_OS_* value */ 131 u8 eth_fp_hsi_major; 132 u8 eth_fp_hsi_minor; 133 u8 padding[3]; 134 } vfdev_info; 135 136 struct vf_pf_resc_request resc_request; 137 138 u64 bulletin_addr; 139 u32 bulletin_size; 140 u32 padding; 141 }; 142 143 /* receive side scaling tlv */ 144 struct vfpf_vport_update_rss_tlv { 145 struct channel_tlv tl; 146 147 u8 update_rss_flags; 148 #define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0) 149 #define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1) 150 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2) 151 #define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3) 152 153 u8 rss_enable; 154 u8 rss_caps; 155 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ 156 u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 157 u32 rss_key[T_ETH_RSS_KEY_SIZE]; 158 }; 159 160 struct pfvf_storm_stats { 161 u32 address; 162 u32 len; 163 }; 164 165 struct pfvf_stats_info { 166 struct pfvf_storm_stats mstats; 167 struct pfvf_storm_stats pstats; 168 struct pfvf_storm_stats tstats; 169 struct pfvf_storm_stats ustats; 170 }; 171 172 /* acquire response tlv - carries the allocated resources */ 173 struct pfvf_acquire_resp_tlv { 174 struct pfvf_tlv hdr; 175 176 struct pf_vf_pfdev_info { 177 u32 chip_num; 178 u32 mfw_ver; 179 180 u16 fw_major; 181 u16 fw_minor; 182 u16 fw_rev; 183 u16 fw_eng; 184 185 u64 capabilities; 186 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED (1 << 0) 187 #define PFVF_ACQUIRE_CAP_100G (1 << 1) /* If set, 100g PF */ 188 /* There are old PF versions where the PF might mistakenly override the sanity 189 * mechanism [version-based] and allow a VF that can't be supported to pass 190 * the acquisition phase. 191 * To overcome this, PFs now indicate that they're past that point and the new 192 * VFs would fail probe on the older PFs that fail to do so. 193 */ 194 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE (1 << 2) 195 196 /* PF expects queues to be received with additional qids */ 197 #define PFVF_ACQUIRE_CAP_QUEUE_QIDS (1 << 3) 198 199 u16 db_size; 200 u8 indices_per_sb; 201 u8 os_type; 202 203 /* These should match the PF's ecore_dev values */ 204 u16 chip_rev; 205 u8 dev_type; 206 207 u8 padding; 208 209 struct pfvf_stats_info stats_info; 210 211 u8 port_mac[ETH_ALEN]; 212 213 /* It's possible PF had to configure an older fastpath HSI 214 * [in case VF is newer than PF]. This is communicated back 215 * to the VF. It can also be used in case of error due to 216 * non-matching versions to shed light in VF about failure. 217 */ 218 u8 major_fp_hsi; 219 u8 minor_fp_hsi; 220 } pfdev_info; 221 222 struct pf_vf_resc { 223 /* in case of status NO_RESOURCE in message hdr, pf will fill 224 * this struct with suggested amount of resources for next 225 * acquire request 226 */ 227 #define PFVF_MAX_QUEUES_PER_VF 16 228 #define PFVF_MAX_SBS_PER_VF 16 229 struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; 230 u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; 231 u8 cid[PFVF_MAX_QUEUES_PER_VF]; 232 233 u8 num_rxqs; 234 u8 num_txqs; 235 u8 num_sbs; 236 u8 num_mac_filters; 237 u8 num_vlan_filters; 238 u8 num_mc_filters; 239 u8 num_cids; 240 u8 padding; 241 } resc; 242 243 u32 bulletin_size; 244 u32 padding; 245 }; 246 247 struct pfvf_start_queue_resp_tlv { 248 struct pfvf_tlv hdr; 249 u32 offset; /* offset to consumer/producer of queue */ 250 u8 padding[4]; 251 }; 252 253 /* Extended queue information - additional index for reference inside qzone. 254 * If commmunicated between VF/PF, each TLV relating to queues should be 255 * extended by one such [or have a future base TLV that already contains info]. 256 */ 257 struct vfpf_qid_tlv { 258 struct channel_tlv tl; 259 u8 qid; 260 u8 padding[3]; 261 }; 262 263 /* Setup Queue */ 264 struct vfpf_start_rxq_tlv { 265 struct vfpf_first_tlv first_tlv; 266 267 /* physical addresses */ 268 u64 rxq_addr; 269 u64 deprecated_sge_addr; 270 u64 cqe_pbl_addr; 271 272 u16 cqe_pbl_size; 273 u16 hw_sb; 274 u16 rx_qid; 275 u16 hc_rate; /* desired interrupts per sec. */ 276 277 u16 bd_max_bytes; 278 u16 stat_id; 279 u8 sb_index; 280 u8 padding[3]; 281 282 }; 283 284 struct vfpf_start_txq_tlv { 285 struct vfpf_first_tlv first_tlv; 286 287 /* physical addresses */ 288 u64 pbl_addr; 289 u16 pbl_size; 290 u16 stat_id; 291 u16 tx_qid; 292 u16 hw_sb; 293 294 u32 flags; /* VFPF_QUEUE_FLG_X flags */ 295 u16 hc_rate; /* desired interrupts per sec. */ 296 u8 sb_index; 297 u8 padding[3]; 298 }; 299 300 /* Stop RX Queue */ 301 struct vfpf_stop_rxqs_tlv { 302 struct vfpf_first_tlv first_tlv; 303 304 u16 rx_qid; 305 306 /* While the API supports multiple Rx-queues on a single TLV 307 * message, in practice older VFs always used it as one [ecore]. 308 * And there are PFs [starting with the CHANNEL_TLV_QID] which 309 * would start assuming this is always a '1'. So in practice this 310 * field should be considered deprecated and *Always* set to '1'. 311 */ 312 u8 num_rxqs; 313 314 u8 cqe_completion; 315 u8 padding[4]; 316 }; 317 318 /* Stop TX Queues */ 319 struct vfpf_stop_txqs_tlv { 320 struct vfpf_first_tlv first_tlv; 321 322 u16 tx_qid; 323 324 /* While the API supports multiple Tx-queues on a single TLV 325 * message, in practice older VFs always used it as one [ecore]. 326 * And there are PFs [starting with the CHANNEL_TLV_QID] which 327 * would start assuming this is always a '1'. So in practice this 328 * field should be considered deprecated and *Always* set to '1'. 329 */ 330 u8 num_txqs; 331 u8 padding[5]; 332 }; 333 334 struct vfpf_update_rxq_tlv { 335 struct vfpf_first_tlv first_tlv; 336 337 u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF]; 338 339 u16 rx_qid; 340 u8 num_rxqs; 341 u8 flags; 342 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0) 343 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1) 344 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2) 345 346 u8 padding[4]; 347 }; 348 349 /* Set Queue Filters */ 350 struct vfpf_q_mac_vlan_filter { 351 u32 flags; 352 #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 353 #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 354 #define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ 355 356 u8 mac[ETH_ALEN]; 357 u16 vlan_tag; 358 359 u8 padding[4]; 360 }; 361 362 /* Start a vport */ 363 struct vfpf_vport_start_tlv { 364 struct vfpf_first_tlv first_tlv; 365 366 u64 sb_addr[PFVF_MAX_SBS_PER_VF]; 367 368 u32 tpa_mode; 369 u16 dep1; 370 u16 mtu; 371 372 u8 vport_id; 373 u8 inner_vlan_removal; 374 375 u8 only_untagged; 376 u8 max_buffers_per_cqe; 377 378 u8 padding[4]; 379 }; 380 381 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */ 382 struct vfpf_vport_update_activate_tlv { 383 struct channel_tlv tl; 384 u8 update_rx; 385 u8 update_tx; 386 u8 active_rx; 387 u8 active_tx; 388 }; 389 390 struct vfpf_vport_update_tx_switch_tlv { 391 struct channel_tlv tl; 392 u8 tx_switching; 393 u8 padding[3]; 394 }; 395 396 struct vfpf_vport_update_vlan_strip_tlv { 397 struct channel_tlv tl; 398 u8 remove_vlan; 399 u8 padding[3]; 400 }; 401 402 struct vfpf_vport_update_mcast_bin_tlv { 403 struct channel_tlv tl; 404 u8 padding[4]; 405 406 u64 bins[8]; 407 }; 408 409 struct vfpf_vport_update_accept_param_tlv { 410 struct channel_tlv tl; 411 u8 update_rx_mode; 412 u8 update_tx_mode; 413 u8 rx_accept_filter; 414 u8 tx_accept_filter; 415 }; 416 417 struct vfpf_vport_update_accept_any_vlan_tlv { 418 struct channel_tlv tl; 419 u8 update_accept_any_vlan_flg; 420 u8 accept_any_vlan; 421 422 u8 padding[2]; 423 }; 424 425 struct vfpf_vport_update_sge_tpa_tlv { 426 struct channel_tlv tl; 427 428 u16 sge_tpa_flags; 429 #define VFPF_TPA_IPV4_EN_FLAG (1 << 0) 430 #define VFPF_TPA_IPV6_EN_FLAG (1 << 1) 431 #define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2) 432 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3) 433 #define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4) 434 435 u8 update_sge_tpa_flags; 436 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0) 437 #define VFPF_UPDATE_TPA_EN_FLAG (1 << 1) 438 #define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2) 439 440 u8 max_buffers_per_cqe; 441 442 u16 deprecated_sge_buff_size; 443 u16 tpa_max_size; 444 u16 tpa_min_size_to_start; 445 u16 tpa_min_size_to_cont; 446 447 u8 tpa_max_aggs_num; 448 u8 padding[7]; 449 450 }; 451 452 /* Primary tlv as a header for various extended tlvs for 453 * various functionalities in vport update ramrod. 454 */ 455 struct vfpf_vport_update_tlv { 456 struct vfpf_first_tlv first_tlv; 457 }; 458 459 struct vfpf_ucast_filter_tlv { 460 struct vfpf_first_tlv first_tlv; 461 462 u8 opcode; 463 u8 type; 464 465 u8 mac[ETH_ALEN]; 466 467 u16 vlan; 468 u16 padding[3]; 469 }; 470 471 /* tunnel update param tlv */ 472 struct vfpf_update_tunn_param_tlv { 473 struct vfpf_first_tlv first_tlv; 474 475 u8 tun_mode_update_mask; 476 u8 tunn_mode; 477 u8 update_tun_cls; 478 u8 vxlan_clss; 479 u8 l2gre_clss; 480 u8 ipgre_clss; 481 u8 l2geneve_clss; 482 u8 ipgeneve_clss; 483 u8 update_geneve_port; 484 u8 update_vxlan_port; 485 u16 geneve_port; 486 u16 vxlan_port; 487 u8 padding[2]; 488 }; 489 490 struct pfvf_update_tunn_param_tlv { 491 struct pfvf_tlv hdr; 492 493 u16 tunn_feature_mask; 494 u8 vxlan_mode; 495 u8 l2geneve_mode; 496 u8 ipgeneve_mode; 497 u8 l2gre_mode; 498 u8 ipgre_mode; 499 u8 vxlan_clss; 500 u8 l2gre_clss; 501 u8 ipgre_clss; 502 u8 l2geneve_clss; 503 u8 ipgeneve_clss; 504 u16 vxlan_udp_port; 505 u16 geneve_udp_port; 506 }; 507 508 struct tlv_buffer_size { 509 u8 tlv_buffer[TLV_BUFFER_SIZE]; 510 }; 511 512 struct vfpf_update_coalesce { 513 struct vfpf_first_tlv first_tlv; 514 u16 rx_coal; 515 u16 tx_coal; 516 u16 qid; 517 u8 padding[2]; 518 }; 519 520 union vfpf_tlvs { 521 struct vfpf_first_tlv first_tlv; 522 struct vfpf_acquire_tlv acquire; 523 struct vfpf_start_rxq_tlv start_rxq; 524 struct vfpf_start_txq_tlv start_txq; 525 struct vfpf_stop_rxqs_tlv stop_rxqs; 526 struct vfpf_stop_txqs_tlv stop_txqs; 527 struct vfpf_update_rxq_tlv update_rxq; 528 struct vfpf_vport_start_tlv start_vport; 529 struct vfpf_vport_update_tlv vport_update; 530 struct vfpf_ucast_filter_tlv ucast_filter; 531 struct vfpf_update_tunn_param_tlv tunn_param_update; 532 struct vfpf_update_coalesce update_coalesce; 533 struct tlv_buffer_size tlv_buf_size; 534 }; 535 536 union pfvf_tlvs { 537 struct pfvf_def_resp_tlv default_resp; 538 struct pfvf_acquire_resp_tlv acquire_resp; 539 struct tlv_buffer_size tlv_buf_size; 540 struct pfvf_start_queue_resp_tlv queue_start; 541 struct pfvf_update_tunn_param_tlv tunn_param_resp; 542 }; 543 544 /* This is a structure which is allocated in the VF, which the PF may update 545 * when it deems it necessary to do so. The bulletin board is sampled 546 * periodically by the VF. A copy per VF is maintained in the PF (to prevent 547 * loss of data upon multiple updates (or the need for read modify write)). 548 */ 549 enum ecore_bulletin_bit { 550 /* Alert the VF that a forced MAC was set by the PF */ 551 MAC_ADDR_FORCED = 0, 552 553 /* The VF should not access the vfpf channel */ 554 VFPF_CHANNEL_INVALID = 1, 555 556 /* Alert the VF that a forced VLAN was set by the PF */ 557 VLAN_ADDR_FORCED = 2, 558 559 /* Indicate that `default_only_untagged' contains actual data */ 560 VFPF_BULLETIN_UNTAGGED_DEFAULT = 3, 561 VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4, 562 563 /* Alert the VF that suggested mac was sent by the PF. 564 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set 565 */ 566 VFPF_BULLETIN_MAC_ADDR = 5 567 }; 568 569 struct ecore_bulletin_content { 570 /* crc of structure to ensure is not in mid-update */ 571 u32 crc; 572 573 u32 version; 574 575 /* bitmap indicating which fields hold valid values */ 576 u64 valid_bitmap; 577 578 /* used for MAC_ADDR or MAC_ADDR_FORCED */ 579 u8 mac[ETH_ALEN]; 580 581 /* If valid, 1 => only untagged Rx if no vlan is configured */ 582 u8 default_only_untagged; 583 u8 padding; 584 585 /* The following is a 'copy' of ecore_mcp_link_state, 586 * ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's 587 * possible the structs will increase further along the road we cannot 588 * have it here; Instead we need to have all of its fields. 589 */ 590 u8 req_autoneg; 591 u8 req_autoneg_pause; 592 u8 req_forced_rx; 593 u8 req_forced_tx; 594 u8 padding2[4]; 595 596 u32 req_adv_speed; 597 u32 req_forced_speed; 598 u32 req_loopback; 599 u32 padding3; 600 601 u8 link_up; 602 u8 full_duplex; 603 u8 autoneg; 604 u8 autoneg_complete; 605 u8 parallel_detection; 606 u8 pfc_enabled; 607 u8 partner_tx_flow_ctrl_en; 608 u8 partner_rx_flow_ctrl_en; 609 610 u8 partner_adv_pause; 611 u8 sfp_tx_fault; 612 u16 vxlan_udp_port; 613 u16 geneve_udp_port; 614 u8 padding4[2]; 615 616 u32 speed; 617 u32 partner_adv_speed; 618 619 u32 capability_speed; 620 621 /* Forced vlan */ 622 u16 pvid; 623 u16 padding5; 624 }; 625 626 struct ecore_bulletin { 627 dma_addr_t phys; 628 struct ecore_bulletin_content *p_virt; 629 u32 size; 630 }; 631 632 enum { 633 /*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/ 634 635 CHANNEL_TLV_NONE, /* ends tlv sequence */ 636 CHANNEL_TLV_ACQUIRE, 637 CHANNEL_TLV_VPORT_START, 638 CHANNEL_TLV_VPORT_UPDATE, 639 CHANNEL_TLV_VPORT_TEARDOWN, 640 CHANNEL_TLV_START_RXQ, 641 CHANNEL_TLV_START_TXQ, 642 CHANNEL_TLV_STOP_RXQS, 643 CHANNEL_TLV_STOP_TXQS, 644 CHANNEL_TLV_UPDATE_RXQ, 645 CHANNEL_TLV_INT_CLEANUP, 646 CHANNEL_TLV_CLOSE, 647 CHANNEL_TLV_RELEASE, 648 CHANNEL_TLV_LIST_END, 649 CHANNEL_TLV_UCAST_FILTER, 650 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 651 CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH, 652 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, 653 CHANNEL_TLV_VPORT_UPDATE_MCAST, 654 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, 655 CHANNEL_TLV_VPORT_UPDATE_RSS, 656 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, 657 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, 658 CHANNEL_TLV_UPDATE_TUNN_PARAM, 659 CHANNEL_TLV_COALESCE_UPDATE, 660 CHANNEL_TLV_QID, 661 CHANNEL_TLV_MAX, 662 663 /* Required for iterating over vport-update tlvs. 664 * Will break in case non-sequential vport-update tlvs. 665 */ 666 CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, 667 668 /*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/ 669 }; 670 extern const char *ecore_channel_tlvs_string[]; 671 672 #endif /* __ECORE_VF_PF_IF_H__ */ 673