1 /*
2  * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
35 #include <net/geneve.h>
36 #include <net/dsfield.h>
37 #include "en.h"
38 #include "en/txrx.h"
39 #include "ipoib/ipoib.h"
40 #include "en_accel/en_accel.h"
41 #include "en_accel/ipsec_rxtx.h"
42 #include "en_accel/macsec.h"
43 #include "en/ptp.h"
44 #include <net/ipv6.h>
45 
46 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
47 {
48 	int i;
49 
50 	for (i = 0; i < num_dma; i++) {
51 		struct mlx5e_sq_dma *last_pushed_dma =
52 			mlx5e_dma_get(sq, --sq->dma_fifo_pc);
53 
54 		mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
55 	}
56 }
57 
58 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
59 {
60 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
61 
62 	return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
63 }
64 
65 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
66 {
67 	if (skb_transport_header_was_set(skb))
68 		return skb_transport_offset(skb);
69 	else
70 		return mlx5e_skb_l2_header_offset(skb);
71 }
72 
73 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
74 					struct sk_buff *skb)
75 {
76 	u16 hlen;
77 
78 	switch (mode) {
79 	case MLX5_INLINE_MODE_NONE:
80 		return 0;
81 	case MLX5_INLINE_MODE_TCP_UDP:
82 		hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
83 		if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
84 			hlen += VLAN_HLEN;
85 		break;
86 	case MLX5_INLINE_MODE_IP:
87 		hlen = mlx5e_skb_l3_header_offset(skb);
88 		break;
89 	case MLX5_INLINE_MODE_L2:
90 	default:
91 		hlen = mlx5e_skb_l2_header_offset(skb);
92 	}
93 	return min_t(u16, hlen, skb_headlen(skb));
94 }
95 
96 #define MLX5_UNSAFE_MEMCPY_DISCLAIMER				\
97 	"This copy has been bounds-checked earlier in "		\
98 	"mlx5i_sq_calc_wqe_attr() and intentionally "		\
99 	"crosses a flex array boundary. Since it is "		\
100 	"performance sensitive, splitting the copy is "		\
101 	"undesirable."
102 
103 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
104 {
105 	struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
106 	int cpy1_sz = 2 * ETH_ALEN;
107 	int cpy2_sz = ihs - cpy1_sz;
108 
109 	memcpy(&vhdr->addrs, skb->data, cpy1_sz);
110 	vhdr->h_vlan_proto = skb->vlan_proto;
111 	vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
112 	unsafe_memcpy(&vhdr->h_vlan_encapsulated_proto,
113 		      skb->data + cpy1_sz,
114 		      cpy2_sz,
115 		      MLX5_UNSAFE_MEMCPY_DISCLAIMER);
116 }
117 
118 static inline void
119 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
120 			    struct mlx5e_accel_tx_state *accel,
121 			    struct mlx5_wqe_eth_seg *eseg)
122 {
123 	if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
124 		return;
125 
126 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
127 		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
128 		if (skb->encapsulation) {
129 			eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
130 					  MLX5_ETH_WQE_L4_INNER_CSUM;
131 			sq->stats->csum_partial_inner++;
132 		} else {
133 			eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
134 			sq->stats->csum_partial++;
135 		}
136 #ifdef CONFIG_MLX5_EN_TLS
137 	} else if (unlikely(accel && accel->tls.tls_tisn)) {
138 		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
139 		sq->stats->csum_partial++;
140 #endif
141 	} else
142 		sq->stats->csum_none++;
143 }
144 
145 /* Returns the number of header bytes that we plan
146  * to inline later in the transmit descriptor
147  */
148 static inline u16
149 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
150 {
151 	struct mlx5e_sq_stats *stats = sq->stats;
152 	u16 ihs;
153 
154 	*hopbyhop = 0;
155 	if (skb->encapsulation) {
156 		ihs = skb_inner_tcp_all_headers(skb);
157 		stats->tso_inner_packets++;
158 		stats->tso_inner_bytes += skb->len - ihs;
159 	} else {
160 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
161 			ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
162 		} else {
163 			ihs = skb_tcp_all_headers(skb);
164 			if (ipv6_has_hopopt_jumbo(skb)) {
165 				*hopbyhop = sizeof(struct hop_jumbo_hdr);
166 				ihs -= sizeof(struct hop_jumbo_hdr);
167 			}
168 		}
169 		stats->tso_packets++;
170 		stats->tso_bytes += skb->len - ihs - *hopbyhop;
171 	}
172 
173 	return ihs;
174 }
175 
176 static inline int
177 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
178 			unsigned char *skb_data, u16 headlen,
179 			struct mlx5_wqe_data_seg *dseg)
180 {
181 	dma_addr_t dma_addr = 0;
182 	u8 num_dma          = 0;
183 	int i;
184 
185 	if (headlen) {
186 		dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
187 					  DMA_TO_DEVICE);
188 		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
189 			goto dma_unmap_wqe_err;
190 
191 		dseg->addr       = cpu_to_be64(dma_addr);
192 		dseg->lkey       = sq->mkey_be;
193 		dseg->byte_count = cpu_to_be32(headlen);
194 
195 		mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
196 		num_dma++;
197 		dseg++;
198 	}
199 
200 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
201 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
202 		int fsz = skb_frag_size(frag);
203 
204 		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
205 					    DMA_TO_DEVICE);
206 		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
207 			goto dma_unmap_wqe_err;
208 
209 		dseg->addr       = cpu_to_be64(dma_addr);
210 		dseg->lkey       = sq->mkey_be;
211 		dseg->byte_count = cpu_to_be32(fsz);
212 
213 		mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
214 		num_dma++;
215 		dseg++;
216 	}
217 
218 	return num_dma;
219 
220 dma_unmap_wqe_err:
221 	mlx5e_dma_unmap_wqe_err(sq, num_dma);
222 	return -ENOMEM;
223 }
224 
225 struct mlx5e_tx_attr {
226 	u32 num_bytes;
227 	u16 headlen;
228 	u16 ihs;
229 	__be16 mss;
230 	u16 insz;
231 	u8 opcode;
232 	u8 hopbyhop;
233 };
234 
235 struct mlx5e_tx_wqe_attr {
236 	u16 ds_cnt;
237 	u16 ds_cnt_inl;
238 	u16 ds_cnt_ids;
239 	u8 num_wqebbs;
240 };
241 
242 static u8
243 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
244 			 struct mlx5e_accel_tx_state *accel)
245 {
246 	u8 mode;
247 
248 #ifdef CONFIG_MLX5_EN_TLS
249 	if (accel && accel->tls.tls_tisn)
250 		return MLX5_INLINE_MODE_TCP_UDP;
251 #endif
252 
253 	mode = sq->min_inline_mode;
254 
255 	if (skb_vlan_tag_present(skb) &&
256 	    test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
257 		mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
258 
259 	return mode;
260 }
261 
262 static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
263 				  struct mlx5e_accel_tx_state *accel,
264 				  struct mlx5e_tx_attr *attr)
265 {
266 	struct mlx5e_sq_stats *stats = sq->stats;
267 
268 	if (skb_is_gso(skb)) {
269 		int hopbyhop;
270 		u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
271 
272 		*attr = (struct mlx5e_tx_attr) {
273 			.opcode    = MLX5_OPCODE_LSO,
274 			.mss       = cpu_to_be16(skb_shinfo(skb)->gso_size),
275 			.ihs       = ihs,
276 			.num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
277 			.headlen   = skb_headlen(skb) - ihs - hopbyhop,
278 			.hopbyhop  = hopbyhop,
279 		};
280 
281 		stats->packets += skb_shinfo(skb)->gso_segs;
282 	} else {
283 		u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
284 		u16 ihs = mlx5e_calc_min_inline(mode, skb);
285 
286 		*attr = (struct mlx5e_tx_attr) {
287 			.opcode    = MLX5_OPCODE_SEND,
288 			.mss       = cpu_to_be16(0),
289 			.ihs       = ihs,
290 			.num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN),
291 			.headlen   = skb_headlen(skb) - ihs,
292 		};
293 
294 		stats->packets++;
295 	}
296 
297 	attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
298 	stats->bytes += attr->num_bytes;
299 }
300 
301 static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr,
302 				   struct mlx5e_tx_wqe_attr *wqe_attr)
303 {
304 	u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
305 	u16 ds_cnt_inl = 0;
306 	u16 ds_cnt_ids = 0;
307 
308 	/* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */
309 
310 	if (attr->insz)
311 		ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
312 					  MLX5_SEND_WQE_DS);
313 
314 	ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids;
315 	if (attr->ihs) {
316 		u16 inl = attr->ihs - INL_HDR_START_SZ;
317 
318 		if (skb_vlan_tag_present(skb))
319 			inl += VLAN_HLEN;
320 
321 		ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
322 		if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS))
323 			netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl,
324 				    (u16)MLX5E_MAX_TX_INLINE_DS);
325 		ds_cnt += ds_cnt_inl;
326 	}
327 
328 	*wqe_attr = (struct mlx5e_tx_wqe_attr) {
329 		.ds_cnt     = ds_cnt,
330 		.ds_cnt_inl = ds_cnt_inl,
331 		.ds_cnt_ids = ds_cnt_ids,
332 		.num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
333 	};
334 }
335 
336 static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
337 {
338 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
339 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
340 }
341 
342 static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
343 {
344 	if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
345 		netif_tx_stop_queue(sq->txq);
346 		sq->stats->stopped++;
347 	}
348 }
349 
350 static void mlx5e_tx_flush(struct mlx5e_txqsq *sq)
351 {
352 	struct mlx5e_tx_wqe_info *wi;
353 	struct mlx5e_tx_wqe *wqe;
354 	u16 pi;
355 
356 	/* Must not be called when a MPWQE session is active but empty. */
357 	mlx5e_tx_mpwqe_ensure_complete(sq);
358 
359 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
360 	wi = &sq->db.wqe_info[pi];
361 
362 	*wi = (struct mlx5e_tx_wqe_info) {
363 		.num_wqebbs = 1,
364 	};
365 
366 	wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
367 	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
368 }
369 
370 static inline void
371 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
372 		     const struct mlx5e_tx_attr *attr,
373 		     const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
374 		     struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
375 		     bool xmit_more)
376 {
377 	struct mlx5_wq_cyc *wq = &sq->wq;
378 	bool send_doorbell;
379 
380 	*wi = (struct mlx5e_tx_wqe_info) {
381 		.skb = skb,
382 		.num_bytes = attr->num_bytes,
383 		.num_dma = num_dma,
384 		.num_wqebbs = wqe_attr->num_wqebbs,
385 		.num_fifo_pkts = 0,
386 	};
387 
388 	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
389 	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
390 
391 	mlx5e_tx_skb_update_hwts_flags(skb);
392 
393 	sq->pc += wi->num_wqebbs;
394 
395 	mlx5e_tx_check_stop(sq);
396 
397 	if (unlikely(sq->ptpsq)) {
398 		mlx5e_skb_cb_hwtstamp_init(skb);
399 		mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
400 		if (!netif_tx_queue_stopped(sq->txq) &&
401 		    !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) {
402 			netif_tx_stop_queue(sq->txq);
403 			sq->stats->stopped++;
404 		}
405 		skb_get(skb);
406 	}
407 
408 	send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
409 	if (send_doorbell)
410 		mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
411 }
412 
413 static void
414 mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
415 		  const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr,
416 		  struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
417 {
418 	struct mlx5_wqe_ctrl_seg *cseg;
419 	struct mlx5_wqe_eth_seg  *eseg;
420 	struct mlx5_wqe_data_seg *dseg;
421 	struct mlx5e_tx_wqe_info *wi;
422 	u16 ihs = attr->ihs;
423 	struct ipv6hdr *h6;
424 	struct mlx5e_sq_stats *stats = sq->stats;
425 	int num_dma;
426 
427 	stats->xmit_more += xmit_more;
428 
429 	/* fill wqe */
430 	wi   = &sq->db.wqe_info[pi];
431 	cseg = &wqe->ctrl;
432 	eseg = &wqe->eth;
433 	dseg =  wqe->data;
434 
435 	eseg->mss = attr->mss;
436 
437 	if (ihs) {
438 		u8 *start = eseg->inline_hdr.start;
439 
440 		if (unlikely(attr->hopbyhop)) {
441 			/* remove the HBH header.
442 			 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
443 			 */
444 			if (skb_vlan_tag_present(skb)) {
445 				mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
446 				ihs += VLAN_HLEN;
447 				h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
448 			} else {
449 				unsafe_memcpy(start, skb->data,
450 					      ETH_HLEN + sizeof(*h6),
451 					      MLX5_UNSAFE_MEMCPY_DISCLAIMER);
452 				h6 = (struct ipv6hdr *)(start + ETH_HLEN);
453 			}
454 			h6->nexthdr = IPPROTO_TCP;
455 			/* Copy the TCP header after the IPv6 one */
456 			memcpy(h6 + 1,
457 			       skb->data + ETH_HLEN + sizeof(*h6) +
458 					sizeof(struct hop_jumbo_hdr),
459 			       tcp_hdrlen(skb));
460 			/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
461 		} else if (skb_vlan_tag_present(skb)) {
462 			mlx5e_insert_vlan(start, skb, ihs);
463 			ihs += VLAN_HLEN;
464 			stats->added_vlan_packets++;
465 		} else {
466 			unsafe_memcpy(eseg->inline_hdr.start, skb->data,
467 				      attr->ihs,
468 				      MLX5_UNSAFE_MEMCPY_DISCLAIMER);
469 		}
470 		eseg->inline_hdr.sz |= cpu_to_be16(ihs);
471 		dseg += wqe_attr->ds_cnt_inl;
472 	} else if (skb_vlan_tag_present(skb)) {
473 		eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
474 		if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
475 			eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
476 		eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
477 		stats->added_vlan_packets++;
478 	}
479 
480 	dseg += wqe_attr->ds_cnt_ids;
481 	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
482 					  attr->headlen, dseg);
483 	if (unlikely(num_dma < 0))
484 		goto err_drop;
485 
486 	mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
487 
488 	return;
489 
490 err_drop:
491 	stats->dropped++;
492 	dev_kfree_skb_any(skb);
493 	mlx5e_tx_flush(sq);
494 }
495 
496 static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
497 {
498 	return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
499 	       !attr->insz && !mlx5e_macsec_skb_is_offload(skb);
500 }
501 
502 static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
503 {
504 	struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
505 
506 	/* Assumes the session is already running and has at least one packet. */
507 	return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
508 }
509 
510 static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
511 					 struct mlx5_wqe_eth_seg *eseg)
512 {
513 	struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
514 	struct mlx5e_tx_wqe *wqe;
515 	u16 pi;
516 
517 	pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
518 	wqe = MLX5E_TX_FETCH_WQE(sq, pi);
519 	net_prefetchw(wqe->data);
520 
521 	*session = (struct mlx5e_tx_mpwqe) {
522 		.wqe = wqe,
523 		.bytes_count = 0,
524 		.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
525 		.pkt_count = 0,
526 		.inline_on = 0,
527 	};
528 
529 	memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
530 
531 	sq->stats->mpwqe_blks++;
532 }
533 
534 static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
535 {
536 	return sq->mpwqe.wqe;
537 }
538 
539 static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
540 {
541 	struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
542 	struct mlx5_wqe_data_seg *dseg;
543 
544 	dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
545 
546 	session->pkt_count++;
547 	session->bytes_count += txd->len;
548 
549 	dseg->addr = cpu_to_be64(txd->dma_addr);
550 	dseg->byte_count = cpu_to_be32(txd->len);
551 	dseg->lkey = sq->mkey_be;
552 	session->ds_count++;
553 
554 	sq->stats->mpwqe_pkts++;
555 }
556 
557 static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
558 {
559 	struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
560 	u8 ds_count = session->ds_count;
561 	struct mlx5_wqe_ctrl_seg *cseg;
562 	struct mlx5e_tx_wqe_info *wi;
563 	u16 pi;
564 
565 	cseg = &session->wqe->ctrl;
566 	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
567 	cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
568 
569 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
570 	wi = &sq->db.wqe_info[pi];
571 	*wi = (struct mlx5e_tx_wqe_info) {
572 		.skb = NULL,
573 		.num_bytes = session->bytes_count,
574 		.num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS),
575 		.num_dma = session->pkt_count,
576 		.num_fifo_pkts = session->pkt_count,
577 	};
578 
579 	sq->pc += wi->num_wqebbs;
580 
581 	session->wqe = NULL;
582 
583 	mlx5e_tx_check_stop(sq);
584 
585 	return cseg;
586 }
587 
588 static void
589 mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
590 		    struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
591 {
592 	struct mlx5_wqe_ctrl_seg *cseg;
593 	struct mlx5e_xmit_data txd;
594 
595 	txd.data = skb->data;
596 	txd.len = skb->len;
597 
598 	txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
599 	if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
600 		goto err_unmap;
601 
602 	if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
603 		mlx5e_tx_mpwqe_session_start(sq, eseg);
604 	} else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
605 		mlx5e_tx_mpwqe_session_complete(sq);
606 		mlx5e_tx_mpwqe_session_start(sq, eseg);
607 	}
608 
609 	sq->stats->xmit_more += xmit_more;
610 
611 	mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
612 	mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
613 	mlx5e_tx_mpwqe_add_dseg(sq, &txd);
614 	mlx5e_tx_skb_update_hwts_flags(skb);
615 
616 	if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
617 		/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
618 		cseg = mlx5e_tx_mpwqe_session_complete(sq);
619 
620 		if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
621 			mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
622 	} else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
623 		/* Might stop the queue, but we were asked to ring the doorbell anyway. */
624 		cseg = mlx5e_tx_mpwqe_session_complete(sq);
625 
626 		mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
627 	}
628 
629 	return;
630 
631 err_unmap:
632 	mlx5e_dma_unmap_wqe_err(sq, 1);
633 	sq->stats->dropped++;
634 	dev_kfree_skb_any(skb);
635 	mlx5e_tx_flush(sq);
636 }
637 
638 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
639 {
640 	/* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */
641 	if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
642 		mlx5e_tx_mpwqe_session_complete(sq);
643 }
644 
645 static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
646 				 struct mlx5_wqe_eth_seg *eseg)
647 {
648 	if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
649 		eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
650 							ptpsq->ts_cqe_ctr_mask);
651 }
652 
653 static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
654 				   struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
655 				   struct mlx5_wqe_eth_seg *eseg, u16 ihs)
656 {
657 	mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
658 	mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
659 	if (unlikely(sq->ptpsq))
660 		mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
661 }
662 
663 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
664 {
665 	struct mlx5e_priv *priv = netdev_priv(dev);
666 	struct mlx5e_accel_tx_state accel = {};
667 	struct mlx5e_tx_wqe_attr wqe_attr;
668 	struct mlx5e_tx_attr attr;
669 	struct mlx5e_tx_wqe *wqe;
670 	struct mlx5e_txqsq *sq;
671 	u16 pi;
672 
673 	/* All changes to txq2sq are performed in sync with mlx5e_xmit, when the
674 	 * queue being changed is disabled, and smp_wmb guarantees that the
675 	 * changes are visible before mlx5e_xmit tries to read from txq2sq. It
676 	 * guarantees that the value of txq2sq[qid] doesn't change while
677 	 * mlx5e_xmit is running on queue number qid. smb_wmb is paired with
678 	 * HARD_TX_LOCK around ndo_start_xmit, which serves as an ACQUIRE.
679 	 */
680 	sq = priv->txq2sq[skb_get_queue_mapping(skb)];
681 	if (unlikely(!sq)) {
682 		/* Two cases when sq can be NULL:
683 		 * 1. The HTB node is registered, and mlx5e_select_queue
684 		 * selected its queue ID, but the SQ itself is not yet created.
685 		 * 2. HTB SQ creation failed. Similar to the previous case, but
686 		 * the SQ won't be created.
687 		 */
688 		dev_kfree_skb_any(skb);
689 		return NETDEV_TX_OK;
690 	}
691 
692 	/* May send SKBs and WQEs. */
693 	if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
694 		return NETDEV_TX_OK;
695 
696 	mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
697 
698 	if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
699 		if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
700 			struct mlx5_wqe_eth_seg eseg = {};
701 
702 			mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs);
703 			mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
704 			return NETDEV_TX_OK;
705 		}
706 
707 		mlx5e_tx_mpwqe_ensure_complete(sq);
708 	}
709 
710 	mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
711 	pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
712 	wqe = MLX5E_TX_FETCH_WQE(sq, pi);
713 
714 	/* May update the WQE, but may not post other WQEs. */
715 	mlx5e_accel_tx_finish(sq, wqe, &accel,
716 			      (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
717 	mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs);
718 	mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
719 
720 	return NETDEV_TX_OK;
721 }
722 
723 static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
724 				  u32 *dma_fifo_cc)
725 {
726 	int i;
727 
728 	for (i = 0; i < wi->num_dma; i++) {
729 		struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
730 
731 		mlx5e_tx_dma_unmap(sq->pdev, dma);
732 	}
733 }
734 
735 static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
736 			      struct mlx5_cqe64 *cqe, int napi_budget)
737 {
738 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
739 		struct skb_shared_hwtstamps hwts = {};
740 		u64 ts = get_cqe_ts(cqe);
741 
742 		hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
743 		if (sq->ptpsq)
744 			mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
745 						      hwts.hwtstamp, sq->ptpsq->cq_stats);
746 		else
747 			skb_tstamp_tx(skb, &hwts);
748 	}
749 
750 	napi_consume_skb(skb, napi_budget);
751 }
752 
753 static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
754 					  struct mlx5_cqe64 *cqe, int napi_budget)
755 {
756 	int i;
757 
758 	for (i = 0; i < wi->num_fifo_pkts; i++) {
759 		struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo);
760 
761 		mlx5e_consume_skb(sq, skb, cqe, napi_budget);
762 	}
763 }
764 
765 void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
766 {
767 	if (netif_tx_queue_stopped(sq->txq) &&
768 	    mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
769 	    mlx5e_ptpsq_fifo_has_room(sq) &&
770 	    !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
771 		netif_tx_wake_queue(sq->txq);
772 		sq->stats->wake++;
773 	}
774 }
775 
776 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
777 {
778 	struct mlx5e_sq_stats *stats;
779 	struct mlx5e_txqsq *sq;
780 	struct mlx5_cqe64 *cqe;
781 	u32 dma_fifo_cc;
782 	u32 nbytes;
783 	u16 npkts;
784 	u16 sqcc;
785 	int i;
786 
787 	sq = container_of(cq, struct mlx5e_txqsq, cq);
788 
789 	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
790 		return false;
791 
792 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
793 	if (!cqe)
794 		return false;
795 
796 	stats = sq->stats;
797 
798 	npkts = 0;
799 	nbytes = 0;
800 
801 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
802 	 * otherwise a cq overrun may occur
803 	 */
804 	sqcc = sq->cc;
805 
806 	/* avoid dirtying sq cache line every cqe */
807 	dma_fifo_cc = sq->dma_fifo_cc;
808 
809 	i = 0;
810 	do {
811 		struct mlx5e_tx_wqe_info *wi;
812 		u16 wqe_counter;
813 		bool last_wqe;
814 		u16 ci;
815 
816 		mlx5_cqwq_pop(&cq->wq);
817 
818 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
819 
820 		do {
821 			last_wqe = (sqcc == wqe_counter);
822 
823 			ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
824 			wi = &sq->db.wqe_info[ci];
825 
826 			sqcc += wi->num_wqebbs;
827 
828 			if (likely(wi->skb)) {
829 				mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
830 				mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
831 
832 				npkts++;
833 				nbytes += wi->num_bytes;
834 				continue;
835 			}
836 
837 			if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
838 									       &dma_fifo_cc)))
839 				continue;
840 
841 			if (wi->num_fifo_pkts) {
842 				mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
843 				mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
844 
845 				npkts += wi->num_fifo_pkts;
846 				nbytes += wi->num_bytes;
847 			}
848 		} while (!last_wqe);
849 
850 		if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
851 			if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
852 					      &sq->state)) {
853 				mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
854 						     (struct mlx5_err_cqe *)cqe);
855 				mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
856 				queue_work(cq->priv->wq, &sq->recover_work);
857 			}
858 			stats->cqe_err++;
859 		}
860 
861 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
862 
863 	stats->cqes += i;
864 
865 	mlx5_cqwq_update_db_record(&cq->wq);
866 
867 	/* ensure cq space is freed before enabling more cqes */
868 	wmb();
869 
870 	sq->dma_fifo_cc = dma_fifo_cc;
871 	sq->cc = sqcc;
872 
873 	netdev_tx_completed_queue(sq->txq, npkts, nbytes);
874 
875 	mlx5e_txqsq_wake(sq);
876 
877 	return (i == MLX5E_TX_CQ_POLL_BUDGET);
878 }
879 
880 static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
881 {
882 	int i;
883 
884 	for (i = 0; i < wi->num_fifo_pkts; i++)
885 		dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo));
886 }
887 
888 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
889 {
890 	struct mlx5e_tx_wqe_info *wi;
891 	u32 dma_fifo_cc, nbytes = 0;
892 	u16 ci, sqcc, npkts = 0;
893 
894 	sqcc = sq->cc;
895 	dma_fifo_cc = sq->dma_fifo_cc;
896 
897 	while (sqcc != sq->pc) {
898 		ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
899 		wi = &sq->db.wqe_info[ci];
900 
901 		sqcc += wi->num_wqebbs;
902 
903 		if (likely(wi->skb)) {
904 			mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
905 			dev_kfree_skb_any(wi->skb);
906 
907 			npkts++;
908 			nbytes += wi->num_bytes;
909 			continue;
910 		}
911 
912 		if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
913 			continue;
914 
915 		if (wi->num_fifo_pkts) {
916 			mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
917 			mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
918 
919 			npkts += wi->num_fifo_pkts;
920 			nbytes += wi->num_bytes;
921 		}
922 	}
923 
924 	sq->dma_fifo_cc = dma_fifo_cc;
925 	sq->cc = sqcc;
926 
927 	netdev_tx_completed_queue(sq->txq, npkts, nbytes);
928 }
929 
930 #ifdef CONFIG_MLX5_CORE_IPOIB
931 static inline void
932 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
933 			   struct mlx5_wqe_datagram_seg *dseg)
934 {
935 	memcpy(&dseg->av, av, sizeof(struct mlx5_av));
936 	dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
937 	dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
938 }
939 
940 static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb,
941 				   const struct mlx5e_tx_attr *attr,
942 				   struct mlx5e_tx_wqe_attr *wqe_attr)
943 {
944 	u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS;
945 	u16 ds_cnt_inl = 0;
946 
947 	ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
948 
949 	if (attr->ihs) {
950 		u16 inl = attr->ihs - INL_HDR_START_SZ;
951 
952 		ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
953 		ds_cnt += ds_cnt_inl;
954 	}
955 
956 	*wqe_attr = (struct mlx5e_tx_wqe_attr) {
957 		.ds_cnt     = ds_cnt,
958 		.ds_cnt_inl = ds_cnt_inl,
959 		.num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
960 	};
961 }
962 
963 void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
964 		   struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
965 {
966 	struct mlx5e_tx_wqe_attr wqe_attr;
967 	struct mlx5e_tx_attr attr;
968 	struct mlx5i_tx_wqe *wqe;
969 
970 	struct mlx5_wqe_datagram_seg *datagram;
971 	struct mlx5_wqe_ctrl_seg *cseg;
972 	struct mlx5_wqe_eth_seg  *eseg;
973 	struct mlx5_wqe_data_seg *dseg;
974 	struct mlx5e_tx_wqe_info *wi;
975 
976 	struct mlx5e_sq_stats *stats = sq->stats;
977 	int num_dma;
978 	u16 pi;
979 
980 	mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
981 	mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
982 
983 	pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
984 	wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
985 
986 	stats->xmit_more += xmit_more;
987 
988 	/* fill wqe */
989 	wi       = &sq->db.wqe_info[pi];
990 	cseg     = &wqe->ctrl;
991 	datagram = &wqe->datagram;
992 	eseg     = &wqe->eth;
993 	dseg     =  wqe->data;
994 
995 	mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
996 
997 	mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
998 
999 	eseg->mss = attr.mss;
1000 
1001 	if (attr.ihs) {
1002 		if (unlikely(attr.hopbyhop)) {
1003 			struct ipv6hdr *h6;
1004 
1005 			/* remove the HBH header.
1006 			 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
1007 			 */
1008 			unsafe_memcpy(eseg->inline_hdr.start, skb->data,
1009 				      ETH_HLEN + sizeof(*h6),
1010 				      MLX5_UNSAFE_MEMCPY_DISCLAIMER);
1011 			h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
1012 			h6->nexthdr = IPPROTO_TCP;
1013 			/* Copy the TCP header after the IPv6 one */
1014 			unsafe_memcpy(h6 + 1,
1015 				      skb->data + ETH_HLEN + sizeof(*h6) +
1016 						  sizeof(struct hop_jumbo_hdr),
1017 				      tcp_hdrlen(skb),
1018 				      MLX5_UNSAFE_MEMCPY_DISCLAIMER);
1019 			/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
1020 		} else {
1021 			unsafe_memcpy(eseg->inline_hdr.start, skb->data,
1022 				      attr.ihs,
1023 				      MLX5_UNSAFE_MEMCPY_DISCLAIMER);
1024 		}
1025 		eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
1026 		dseg += wqe_attr.ds_cnt_inl;
1027 	}
1028 
1029 	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
1030 					  attr.headlen, dseg);
1031 	if (unlikely(num_dma < 0))
1032 		goto err_drop;
1033 
1034 	mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
1035 
1036 	return;
1037 
1038 err_drop:
1039 	stats->dropped++;
1040 	dev_kfree_skb_any(skb);
1041 	mlx5e_tx_flush(sq);
1042 }
1043 #endif
1044