145c98dacSZbigniew Bodek /*-
29eb1615fSMarcin Wojtas * SPDX-License-Identifier: BSD-3-Clause
345c98dacSZbigniew Bodek *
4*adfed2d8SArthur Kiyanovski * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
545c98dacSZbigniew Bodek * All rights reserved.
645c98dacSZbigniew Bodek *
745c98dacSZbigniew Bodek * Redistribution and use in source and binary forms, with or without
845c98dacSZbigniew Bodek * modification, are permitted provided that the following conditions
945c98dacSZbigniew Bodek * are met:
1045c98dacSZbigniew Bodek *
1145c98dacSZbigniew Bodek * * Redistributions of source code must retain the above copyright
1245c98dacSZbigniew Bodek * notice, this list of conditions and the following disclaimer.
1345c98dacSZbigniew Bodek * * Redistributions in binary form must reproduce the above copyright
1445c98dacSZbigniew Bodek * notice, this list of conditions and the following disclaimer in
1545c98dacSZbigniew Bodek * the documentation and/or other materials provided with the
1645c98dacSZbigniew Bodek * distribution.
1745c98dacSZbigniew Bodek * * Neither the name of copyright holder nor the names of its
1845c98dacSZbigniew Bodek * contributors may be used to endorse or promote products derived
1945c98dacSZbigniew Bodek * from this software without specific prior written permission.
2045c98dacSZbigniew Bodek *
2145c98dacSZbigniew Bodek * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2245c98dacSZbigniew Bodek * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2345c98dacSZbigniew Bodek * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2445c98dacSZbigniew Bodek * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2545c98dacSZbigniew Bodek * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2645c98dacSZbigniew Bodek * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2745c98dacSZbigniew Bodek * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2845c98dacSZbigniew Bodek * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2945c98dacSZbigniew Bodek * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3045c98dacSZbigniew Bodek * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3145c98dacSZbigniew Bodek * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3245c98dacSZbigniew Bodek */
3345c98dacSZbigniew Bodek
3445c98dacSZbigniew Bodek #ifndef ENA_ETH_COM_H_
3545c98dacSZbigniew Bodek #define ENA_ETH_COM_H_
3645c98dacSZbigniew Bodek
3745c98dacSZbigniew Bodek #if defined(__cplusplus)
3845c98dacSZbigniew Bodek extern "C" {
3945c98dacSZbigniew Bodek #endif
4045c98dacSZbigniew Bodek #include "ena_com.h"
4145c98dacSZbigniew Bodek
4245c98dacSZbigniew Bodek struct ena_com_tx_ctx {
4345c98dacSZbigniew Bodek struct ena_com_tx_meta ena_meta;
4445c98dacSZbigniew Bodek struct ena_com_buf *ena_bufs;
4545c98dacSZbigniew Bodek /* For LLQ, header buffer - pushed to the device mem space */
4645c98dacSZbigniew Bodek void *push_header;
4745c98dacSZbigniew Bodek
4845c98dacSZbigniew Bodek enum ena_eth_io_l3_proto_index l3_proto;
4945c98dacSZbigniew Bodek enum ena_eth_io_l4_proto_index l4_proto;
5045c98dacSZbigniew Bodek u16 num_bufs;
5145c98dacSZbigniew Bodek u16 req_id;
5245c98dacSZbigniew Bodek /* For regular queue, indicate the size of the header
5345c98dacSZbigniew Bodek * For LLQ, indicate the size of the pushed buffer
5445c98dacSZbigniew Bodek */
5545c98dacSZbigniew Bodek u16 header_len;
5645c98dacSZbigniew Bodek
5745c98dacSZbigniew Bodek u8 meta_valid;
5845c98dacSZbigniew Bodek u8 tso_enable;
5945c98dacSZbigniew Bodek u8 l3_csum_enable;
6045c98dacSZbigniew Bodek u8 l4_csum_enable;
6145c98dacSZbigniew Bodek u8 l4_csum_partial;
6245c98dacSZbigniew Bodek u8 df; /* Don't fragment */
6345c98dacSZbigniew Bodek };
6445c98dacSZbigniew Bodek
6545c98dacSZbigniew Bodek struct ena_com_rx_ctx {
6645c98dacSZbigniew Bodek struct ena_com_rx_buf_info *ena_bufs;
6745c98dacSZbigniew Bodek enum ena_eth_io_l3_proto_index l3_proto;
6845c98dacSZbigniew Bodek enum ena_eth_io_l4_proto_index l4_proto;
6945c98dacSZbigniew Bodek bool l3_csum_err;
7045c98dacSZbigniew Bodek bool l4_csum_err;
7167ec48bbSMarcin Wojtas u8 l4_csum_checked;
7245c98dacSZbigniew Bodek /* fragmented packet */
7345c98dacSZbigniew Bodek bool frag;
7445c98dacSZbigniew Bodek u32 hash;
7545c98dacSZbigniew Bodek u16 descs;
76*adfed2d8SArthur Kiyanovski u16 max_bufs;
778483b844SMarcin Wojtas u8 pkt_offset;
7845c98dacSZbigniew Bodek };
7945c98dacSZbigniew Bodek
8045c98dacSZbigniew Bodek int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
8145c98dacSZbigniew Bodek struct ena_com_tx_ctx *ena_tx_ctx,
8245c98dacSZbigniew Bodek int *nb_hw_desc);
8345c98dacSZbigniew Bodek
8445c98dacSZbigniew Bodek int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
8545c98dacSZbigniew Bodek struct ena_com_io_sq *io_sq,
8645c98dacSZbigniew Bodek struct ena_com_rx_ctx *ena_rx_ctx);
8745c98dacSZbigniew Bodek
8845c98dacSZbigniew Bodek int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
8945c98dacSZbigniew Bodek struct ena_com_buf *ena_buf,
9045c98dacSZbigniew Bodek u16 req_id);
9145c98dacSZbigniew Bodek
9267ec48bbSMarcin Wojtas bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
9345c98dacSZbigniew Bodek
ena_com_unmask_intr(struct ena_com_io_cq * io_cq,struct ena_eth_io_intr_reg * intr_reg)9445c98dacSZbigniew Bodek static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
9545c98dacSZbigniew Bodek struct ena_eth_io_intr_reg *intr_reg)
9645c98dacSZbigniew Bodek {
9745c98dacSZbigniew Bodek ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
9845c98dacSZbigniew Bodek }
9945c98dacSZbigniew Bodek
ena_com_free_q_entries(struct ena_com_io_sq * io_sq)1008483b844SMarcin Wojtas static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
10145c98dacSZbigniew Bodek {
10245c98dacSZbigniew Bodek u16 tail, next_to_comp, cnt;
10345c98dacSZbigniew Bodek
10445c98dacSZbigniew Bodek next_to_comp = io_sq->next_to_comp;
10545c98dacSZbigniew Bodek tail = io_sq->tail;
10645c98dacSZbigniew Bodek cnt = tail - next_to_comp;
10745c98dacSZbigniew Bodek
10845c98dacSZbigniew Bodek return io_sq->q_depth - 1 - cnt;
10945c98dacSZbigniew Bodek }
11045c98dacSZbigniew Bodek
111a195fab0SMarcin Wojtas /* Check if the submission queue has enough space to hold required_buffers */
ena_com_sq_have_enough_space(struct ena_com_io_sq * io_sq,u16 required_buffers)112a195fab0SMarcin Wojtas static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
113a195fab0SMarcin Wojtas u16 required_buffers)
114a195fab0SMarcin Wojtas {
115a195fab0SMarcin Wojtas int temp;
116a195fab0SMarcin Wojtas
117a195fab0SMarcin Wojtas if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
1188483b844SMarcin Wojtas return ena_com_free_q_entries(io_sq) >= required_buffers;
119a195fab0SMarcin Wojtas
120a195fab0SMarcin Wojtas /* This calculation doesn't need to be 100% accurate. So to reduce
121a195fab0SMarcin Wojtas * the calculation overhead just Subtract 2 lines from the free descs
122a195fab0SMarcin Wojtas * (one for the header line and one to compensate the devision
123a195fab0SMarcin Wojtas * down calculation.
124a195fab0SMarcin Wojtas */
125a195fab0SMarcin Wojtas temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
126a195fab0SMarcin Wojtas
1278483b844SMarcin Wojtas return ena_com_free_q_entries(io_sq) > temp;
128a195fab0SMarcin Wojtas }
129a195fab0SMarcin Wojtas
ena_com_meta_desc_changed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)13067ec48bbSMarcin Wojtas static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
13167ec48bbSMarcin Wojtas struct ena_com_tx_ctx *ena_tx_ctx)
13267ec48bbSMarcin Wojtas {
13367ec48bbSMarcin Wojtas if (!ena_tx_ctx->meta_valid)
13467ec48bbSMarcin Wojtas return false;
13567ec48bbSMarcin Wojtas
13667ec48bbSMarcin Wojtas return !!memcmp(&io_sq->cached_tx_meta,
13767ec48bbSMarcin Wojtas &ena_tx_ctx->ena_meta,
13867ec48bbSMarcin Wojtas sizeof(struct ena_com_tx_meta));
13967ec48bbSMarcin Wojtas }
14067ec48bbSMarcin Wojtas
is_llq_max_tx_burst_exists(struct ena_com_io_sq * io_sq)14167ec48bbSMarcin Wojtas static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
14267ec48bbSMarcin Wojtas {
14367ec48bbSMarcin Wojtas return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
14467ec48bbSMarcin Wojtas io_sq->llq_info.max_entries_in_tx_burst > 0;
14567ec48bbSMarcin Wojtas }
14667ec48bbSMarcin Wojtas
ena_com_is_doorbell_needed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)14767ec48bbSMarcin Wojtas static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
14867ec48bbSMarcin Wojtas struct ena_com_tx_ctx *ena_tx_ctx)
14967ec48bbSMarcin Wojtas {
15067ec48bbSMarcin Wojtas struct ena_com_llq_info *llq_info;
15167ec48bbSMarcin Wojtas int descs_after_first_entry;
15267ec48bbSMarcin Wojtas int num_entries_needed = 1;
15367ec48bbSMarcin Wojtas u16 num_descs;
15467ec48bbSMarcin Wojtas
15567ec48bbSMarcin Wojtas if (!is_llq_max_tx_burst_exists(io_sq))
15667ec48bbSMarcin Wojtas return false;
15767ec48bbSMarcin Wojtas
15867ec48bbSMarcin Wojtas llq_info = &io_sq->llq_info;
15967ec48bbSMarcin Wojtas num_descs = ena_tx_ctx->num_bufs;
16067ec48bbSMarcin Wojtas
1618483b844SMarcin Wojtas if (llq_info->disable_meta_caching ||
1628483b844SMarcin Wojtas unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
16367ec48bbSMarcin Wojtas ++num_descs;
16467ec48bbSMarcin Wojtas
16567ec48bbSMarcin Wojtas if (num_descs > llq_info->descs_num_before_header) {
16667ec48bbSMarcin Wojtas descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
16767ec48bbSMarcin Wojtas num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
16867ec48bbSMarcin Wojtas llq_info->descs_per_entry);
16967ec48bbSMarcin Wojtas }
17067ec48bbSMarcin Wojtas
1719eb1615fSMarcin Wojtas ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
1729eb1615fSMarcin Wojtas "Queue: %d num_descs: %d num_entries_needed: %d\n",
17367ec48bbSMarcin Wojtas io_sq->qid, num_descs, num_entries_needed);
17467ec48bbSMarcin Wojtas
17567ec48bbSMarcin Wojtas return num_entries_needed > io_sq->entries_in_tx_burst_left;
17667ec48bbSMarcin Wojtas }
17767ec48bbSMarcin Wojtas
ena_com_write_sq_doorbell(struct ena_com_io_sq * io_sq)17845c98dacSZbigniew Bodek static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
17945c98dacSZbigniew Bodek {
18067ec48bbSMarcin Wojtas u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
1818483b844SMarcin Wojtas u16 tail = io_sq->tail;
18245c98dacSZbigniew Bodek
1839eb1615fSMarcin Wojtas ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
1849eb1615fSMarcin Wojtas "Write submission queue doorbell for queue: %d tail: %d\n",
18545c98dacSZbigniew Bodek io_sq->qid, tail);
18645c98dacSZbigniew Bodek
18745c98dacSZbigniew Bodek ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
18845c98dacSZbigniew Bodek
18967ec48bbSMarcin Wojtas if (is_llq_max_tx_burst_exists(io_sq)) {
1909eb1615fSMarcin Wojtas ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
1919eb1615fSMarcin Wojtas "Reset available entries in tx burst for queue %d to %d\n",
19267ec48bbSMarcin Wojtas io_sq->qid, max_entries_in_tx_burst);
19367ec48bbSMarcin Wojtas io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
19467ec48bbSMarcin Wojtas }
19567ec48bbSMarcin Wojtas
19645c98dacSZbigniew Bodek return 0;
19745c98dacSZbigniew Bodek }
19845c98dacSZbigniew Bodek
ena_com_update_numa_node(struct ena_com_io_cq * io_cq,u8 numa_node)19945c98dacSZbigniew Bodek static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
20045c98dacSZbigniew Bodek u8 numa_node)
20145c98dacSZbigniew Bodek {
20245c98dacSZbigniew Bodek struct ena_eth_io_numa_node_cfg_reg numa_cfg;
20345c98dacSZbigniew Bodek
20445c98dacSZbigniew Bodek if (!io_cq->numa_node_cfg_reg)
20545c98dacSZbigniew Bodek return;
20645c98dacSZbigniew Bodek
20745c98dacSZbigniew Bodek numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
20845c98dacSZbigniew Bodek | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
20945c98dacSZbigniew Bodek
21045c98dacSZbigniew Bodek ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
21145c98dacSZbigniew Bodek }
21245c98dacSZbigniew Bodek
ena_com_comp_ack(struct ena_com_io_sq * io_sq,u16 elem)21345c98dacSZbigniew Bodek static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
21445c98dacSZbigniew Bodek {
21545c98dacSZbigniew Bodek io_sq->next_to_comp += elem;
21645c98dacSZbigniew Bodek }
21745c98dacSZbigniew Bodek
ena_com_cq_inc_head(struct ena_com_io_cq * io_cq)21867ec48bbSMarcin Wojtas static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
21967ec48bbSMarcin Wojtas {
22067ec48bbSMarcin Wojtas io_cq->head++;
22167ec48bbSMarcin Wojtas
22267ec48bbSMarcin Wojtas /* Switch phase bit in case of wrap around */
22367ec48bbSMarcin Wojtas if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
22467ec48bbSMarcin Wojtas io_cq->phase ^= 1;
22567ec48bbSMarcin Wojtas }
22667ec48bbSMarcin Wojtas
ena_com_tx_comp_req_id_get(struct ena_com_io_cq * io_cq,u16 * req_id)22767ec48bbSMarcin Wojtas static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
22867ec48bbSMarcin Wojtas u16 *req_id)
22967ec48bbSMarcin Wojtas {
23067ec48bbSMarcin Wojtas u8 expected_phase, cdesc_phase;
23167ec48bbSMarcin Wojtas struct ena_eth_io_tx_cdesc *cdesc;
23267ec48bbSMarcin Wojtas u16 masked_head;
23367ec48bbSMarcin Wojtas
23467ec48bbSMarcin Wojtas masked_head = io_cq->head & (io_cq->q_depth - 1);
23567ec48bbSMarcin Wojtas expected_phase = io_cq->phase;
23667ec48bbSMarcin Wojtas
23767ec48bbSMarcin Wojtas cdesc = (struct ena_eth_io_tx_cdesc *)
23867ec48bbSMarcin Wojtas ((uintptr_t)io_cq->cdesc_addr.virt_addr +
23967ec48bbSMarcin Wojtas (masked_head * io_cq->cdesc_entry_size_in_bytes));
24067ec48bbSMarcin Wojtas
24167ec48bbSMarcin Wojtas /* When the current completion descriptor phase isn't the same as the
24267ec48bbSMarcin Wojtas * expected, it mean that the device still didn't update
24367ec48bbSMarcin Wojtas * this completion.
24467ec48bbSMarcin Wojtas */
24567ec48bbSMarcin Wojtas cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
24667ec48bbSMarcin Wojtas if (cdesc_phase != expected_phase)
24767ec48bbSMarcin Wojtas return ENA_COM_TRY_AGAIN;
24867ec48bbSMarcin Wojtas
24967ec48bbSMarcin Wojtas dma_rmb();
25067ec48bbSMarcin Wojtas
25167ec48bbSMarcin Wojtas *req_id = READ_ONCE16(cdesc->req_id);
25267ec48bbSMarcin Wojtas if (unlikely(*req_id >= io_cq->q_depth)) {
2539eb1615fSMarcin Wojtas ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
2549eb1615fSMarcin Wojtas "Invalid req id %d\n", cdesc->req_id);
25567ec48bbSMarcin Wojtas return ENA_COM_INVAL;
25667ec48bbSMarcin Wojtas }
25767ec48bbSMarcin Wojtas
25867ec48bbSMarcin Wojtas ena_com_cq_inc_head(io_cq);
25967ec48bbSMarcin Wojtas
26067ec48bbSMarcin Wojtas return 0;
26167ec48bbSMarcin Wojtas }
26267ec48bbSMarcin Wojtas
26345c98dacSZbigniew Bodek #if defined(__cplusplus)
26445c98dacSZbigniew Bodek }
26545c98dacSZbigniew Bodek #endif
26645c98dacSZbigniew Bodek #endif /* ENA_ETH_COM_H_ */
267