1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef ENA_ETH_COM_H_
34 #define ENA_ETH_COM_H_
35 
36 #include "ena_com.h"
37 
38 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
39 #define ENA_COMP_HEAD_THRESH 4
40 
41 struct ena_com_tx_ctx {
42 	struct ena_com_tx_meta ena_meta;
43 	struct ena_com_buf *ena_bufs;
44 	/* For LLQ, header buffer - pushed to the device mem space */
45 	void *push_header;
46 
47 	enum ena_eth_io_l3_proto_index l3_proto;
48 	enum ena_eth_io_l4_proto_index l4_proto;
49 	u16 num_bufs;
50 	u16 req_id;
51 	/* For regular queue, indicate the size of the header
52 	 * For LLQ, indicate the size of the pushed buffer
53 	 */
54 	u16 header_len;
55 
56 	u8 meta_valid;
57 	u8 tso_enable;
58 	u8 l3_csum_enable;
59 	u8 l4_csum_enable;
60 	u8 l4_csum_partial;
61 	u8 df; /* Don't fragment */
62 };
63 
64 struct ena_com_rx_ctx {
65 	struct ena_com_rx_buf_info *ena_bufs;
66 	enum ena_eth_io_l3_proto_index l3_proto;
67 	enum ena_eth_io_l4_proto_index l4_proto;
68 	bool l3_csum_err;
69 	bool l4_csum_err;
70 	u8 l4_csum_checked;
71 	/* fragmented packet */
72 	bool frag;
73 	u32 hash;
74 	u16 descs;
75 	int max_bufs;
76 };
77 
78 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
79 		       struct ena_com_tx_ctx *ena_tx_ctx,
80 		       int *nb_hw_desc);
81 
82 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
83 		   struct ena_com_io_sq *io_sq,
84 		   struct ena_com_rx_ctx *ena_rx_ctx);
85 
86 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
87 			       struct ena_com_buf *ena_buf,
88 			       u16 req_id);
89 
90 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
91 
92 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
93 				       struct ena_eth_io_intr_reg *intr_reg)
94 {
95 	writel(intr_reg->intr_control, io_cq->unmask_reg);
96 }
97 
98 static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
99 {
100 	u16 tail, next_to_comp, cnt;
101 
102 	next_to_comp = io_sq->next_to_comp;
103 	tail = io_sq->tail;
104 	cnt = tail - next_to_comp;
105 
106 	return io_sq->q_depth - 1 - cnt;
107 }
108 
109 /* Check if the submission queue has enough space to hold required_buffers */
110 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
111 						u16 required_buffers)
112 {
113 	int temp;
114 
115 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
116 		return ena_com_free_desc(io_sq) >= required_buffers;
117 
118 	/* This calculation doesn't need to be 100% accurate. So to reduce
119 	 * the calculation overhead just Subtract 2 lines from the free descs
120 	 * (one for the header line and one to compensate the devision
121 	 * down calculation.
122 	 */
123 	temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
124 
125 	return ena_com_free_desc(io_sq) > temp;
126 }
127 
128 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
129 					     struct ena_com_tx_ctx *ena_tx_ctx)
130 {
131 	if (!ena_tx_ctx->meta_valid)
132 		return false;
133 
134 	return !!memcmp(&io_sq->cached_tx_meta,
135 			&ena_tx_ctx->ena_meta,
136 			sizeof(struct ena_com_tx_meta));
137 }
138 
139 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
140 {
141 	return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
142 	       io_sq->llq_info.max_entries_in_tx_burst > 0;
143 }
144 
145 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
146 					      struct ena_com_tx_ctx *ena_tx_ctx)
147 {
148 	struct ena_com_llq_info *llq_info;
149 	int descs_after_first_entry;
150 	int num_entries_needed = 1;
151 	u16 num_descs;
152 
153 	if (!is_llq_max_tx_burst_exists(io_sq))
154 		return false;
155 
156 	llq_info = &io_sq->llq_info;
157 	num_descs = ena_tx_ctx->num_bufs;
158 
159 	if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
160 		++num_descs;
161 
162 	if (num_descs > llq_info->descs_num_before_header) {
163 		descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
164 		num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
165 						   llq_info->descs_per_entry);
166 	}
167 
168 	pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
169 		 num_descs, num_entries_needed);
170 
171 	return num_entries_needed > io_sq->entries_in_tx_burst_left;
172 }
173 
174 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
175 {
176 	u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
177 	u16 tail = io_sq->tail;
178 
179 	pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
180 		 io_sq->qid, tail);
181 
182 	writel(tail, io_sq->db_addr);
183 
184 	if (is_llq_max_tx_burst_exists(io_sq)) {
185 		pr_debug("reset available entries in tx burst for queue %d to %d\n",
186 			 io_sq->qid, max_entries_in_tx_burst);
187 		io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
188 	}
189 
190 	return 0;
191 }
192 
193 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
194 {
195 	u16 unreported_comp, head;
196 	bool need_update;
197 
198 	if (unlikely(io_cq->cq_head_db_reg)) {
199 		head = io_cq->head;
200 		unreported_comp = head - io_cq->last_head_update;
201 		need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
202 
203 		if (unlikely(need_update)) {
204 			pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
205 				 io_cq->qid, head);
206 			writel(head, io_cq->cq_head_db_reg);
207 			io_cq->last_head_update = head;
208 		}
209 	}
210 
211 	return 0;
212 }
213 
214 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
215 					    u8 numa_node)
216 {
217 	struct ena_eth_io_numa_node_cfg_reg numa_cfg;
218 
219 	if (!io_cq->numa_node_cfg_reg)
220 		return;
221 
222 	numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
223 		| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
224 
225 	writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
226 }
227 
228 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
229 {
230 	io_sq->next_to_comp += elem;
231 }
232 
233 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
234 {
235 	io_cq->head++;
236 
237 	/* Switch phase bit in case of wrap around */
238 	if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
239 		io_cq->phase ^= 1;
240 }
241 
242 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
243 					     u16 *req_id)
244 {
245 	u8 expected_phase, cdesc_phase;
246 	struct ena_eth_io_tx_cdesc *cdesc;
247 	u16 masked_head;
248 
249 	masked_head = io_cq->head & (io_cq->q_depth - 1);
250 	expected_phase = io_cq->phase;
251 
252 	cdesc = (struct ena_eth_io_tx_cdesc *)
253 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
254 		(masked_head * io_cq->cdesc_entry_size_in_bytes));
255 
256 	/* When the current completion descriptor phase isn't the same as the
257 	 * expected, it mean that the device still didn't update
258 	 * this completion.
259 	 */
260 	cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
261 	if (cdesc_phase != expected_phase)
262 		return -EAGAIN;
263 
264 	dma_rmb();
265 
266 	*req_id = READ_ONCE(cdesc->req_id);
267 	if (unlikely(*req_id >= io_cq->q_depth)) {
268 		pr_err("Invalid req id %d\n", cdesc->req_id);
269 		return -EINVAL;
270 	}
271 
272 	ena_com_cq_inc_head(io_cq);
273 
274 	return 0;
275 }
276 
277 #endif /* ENA_ETH_COM_H_ */
278