1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #ifndef ENA_ETH_COM_H_
35 #define ENA_ETH_COM_H_
36
37 #if defined(__cplusplus)
38 extern "C" {
39 #endif
40 #include "ena_com.h"
41
42 struct ena_com_tx_ctx {
43 struct ena_com_tx_meta ena_meta;
44 struct ena_com_buf *ena_bufs;
45 /* For LLQ, header buffer - pushed to the device mem space */
46 void *push_header;
47
48 enum ena_eth_io_l3_proto_index l3_proto;
49 enum ena_eth_io_l4_proto_index l4_proto;
50 u16 num_bufs;
51 u16 req_id;
52 /* For regular queue, indicate the size of the header
53 * For LLQ, indicate the size of the pushed buffer
54 */
55 u16 header_len;
56
57 u8 meta_valid;
58 u8 tso_enable;
59 u8 l3_csum_enable;
60 u8 l4_csum_enable;
61 u8 l4_csum_partial;
62 u8 df; /* Don't fragment */
63 };
64
65 struct ena_com_rx_ctx {
66 struct ena_com_rx_buf_info *ena_bufs;
67 enum ena_eth_io_l3_proto_index l3_proto;
68 enum ena_eth_io_l4_proto_index l4_proto;
69 bool l3_csum_err;
70 bool l4_csum_err;
71 u8 l4_csum_checked;
72 /* fragmented packet */
73 bool frag;
74 u32 hash;
75 u16 descs;
76 u16 max_bufs;
77 u8 pkt_offset;
78 };
79
80 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
81 struct ena_com_tx_ctx *ena_tx_ctx,
82 int *nb_hw_desc);
83
84 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
85 struct ena_com_io_sq *io_sq,
86 struct ena_com_rx_ctx *ena_rx_ctx);
87
88 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
89 struct ena_com_buf *ena_buf,
90 u16 req_id);
91
92 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
93
ena_com_unmask_intr(struct ena_com_io_cq * io_cq,struct ena_eth_io_intr_reg * intr_reg)94 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
95 struct ena_eth_io_intr_reg *intr_reg)
96 {
97 ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
98 }
99
ena_com_free_q_entries(struct ena_com_io_sq * io_sq)100 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
101 {
102 u16 tail, next_to_comp, cnt;
103
104 next_to_comp = io_sq->next_to_comp;
105 tail = io_sq->tail;
106 cnt = tail - next_to_comp;
107
108 return io_sq->q_depth - 1 - cnt;
109 }
110
111 /* Check if the submission queue has enough space to hold required_buffers */
ena_com_sq_have_enough_space(struct ena_com_io_sq * io_sq,u16 required_buffers)112 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
113 u16 required_buffers)
114 {
115 int temp;
116
117 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
118 return ena_com_free_q_entries(io_sq) >= required_buffers;
119
120 /* This calculation doesn't need to be 100% accurate. So to reduce
121 * the calculation overhead just Subtract 2 lines from the free descs
122 * (one for the header line and one to compensate the devision
123 * down calculation.
124 */
125 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
126
127 return ena_com_free_q_entries(io_sq) > temp;
128 }
129
ena_com_meta_desc_changed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)130 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
131 struct ena_com_tx_ctx *ena_tx_ctx)
132 {
133 if (!ena_tx_ctx->meta_valid)
134 return false;
135
136 return !!memcmp(&io_sq->cached_tx_meta,
137 &ena_tx_ctx->ena_meta,
138 sizeof(struct ena_com_tx_meta));
139 }
140
is_llq_max_tx_burst_exists(struct ena_com_io_sq * io_sq)141 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
142 {
143 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
144 io_sq->llq_info.max_entries_in_tx_burst > 0;
145 }
146
ena_com_is_doorbell_needed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)147 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
148 struct ena_com_tx_ctx *ena_tx_ctx)
149 {
150 struct ena_com_llq_info *llq_info;
151 int descs_after_first_entry;
152 int num_entries_needed = 1;
153 u16 num_descs;
154
155 if (!is_llq_max_tx_burst_exists(io_sq))
156 return false;
157
158 llq_info = &io_sq->llq_info;
159 num_descs = ena_tx_ctx->num_bufs;
160
161 if (llq_info->disable_meta_caching ||
162 unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
163 ++num_descs;
164
165 if (num_descs > llq_info->descs_num_before_header) {
166 descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
167 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
168 llq_info->descs_per_entry);
169 }
170
171 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
172 "Queue: %d num_descs: %d num_entries_needed: %d\n",
173 io_sq->qid, num_descs, num_entries_needed);
174
175 return num_entries_needed > io_sq->entries_in_tx_burst_left;
176 }
177
ena_com_write_sq_doorbell(struct ena_com_io_sq * io_sq)178 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
179 {
180 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
181 u16 tail = io_sq->tail;
182
183 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
184 "Write submission queue doorbell for queue: %d tail: %d\n",
185 io_sq->qid, tail);
186
187 ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
188
189 if (is_llq_max_tx_burst_exists(io_sq)) {
190 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
191 "Reset available entries in tx burst for queue %d to %d\n",
192 io_sq->qid, max_entries_in_tx_burst);
193 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
194 }
195
196 return 0;
197 }
198
ena_com_update_numa_node(struct ena_com_io_cq * io_cq,u8 numa_node)199 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
200 u8 numa_node)
201 {
202 struct ena_eth_io_numa_node_cfg_reg numa_cfg;
203
204 if (!io_cq->numa_node_cfg_reg)
205 return;
206
207 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
208 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
209
210 ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
211 }
212
ena_com_comp_ack(struct ena_com_io_sq * io_sq,u16 elem)213 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
214 {
215 io_sq->next_to_comp += elem;
216 }
217
ena_com_cq_inc_head(struct ena_com_io_cq * io_cq)218 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
219 {
220 io_cq->head++;
221
222 /* Switch phase bit in case of wrap around */
223 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
224 io_cq->phase ^= 1;
225 }
226
ena_com_tx_comp_req_id_get(struct ena_com_io_cq * io_cq,u16 * req_id)227 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
228 u16 *req_id)
229 {
230 u8 expected_phase, cdesc_phase;
231 struct ena_eth_io_tx_cdesc *cdesc;
232 u16 masked_head;
233
234 masked_head = io_cq->head & (io_cq->q_depth - 1);
235 expected_phase = io_cq->phase;
236
237 cdesc = (struct ena_eth_io_tx_cdesc *)
238 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
239 (masked_head * io_cq->cdesc_entry_size_in_bytes));
240
241 /* When the current completion descriptor phase isn't the same as the
242 * expected, it mean that the device still didn't update
243 * this completion.
244 */
245 cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
246 if (cdesc_phase != expected_phase)
247 return ENA_COM_TRY_AGAIN;
248
249 dma_rmb();
250
251 *req_id = READ_ONCE16(cdesc->req_id);
252 if (unlikely(*req_id >= io_cq->q_depth)) {
253 ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
254 "Invalid req id %d\n", cdesc->req_id);
255 return ENA_COM_INVAL;
256 }
257
258 ena_com_cq_inc_head(io_cq);
259
260 return 0;
261 }
262
263 #if defined(__cplusplus)
264 }
265 #endif
266 #endif /* ENA_ETH_COM_H_ */
267