1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3 
4 #include <linux/debugfs.h>
5 #include "en_accel/ktls.h"
6 #include "en_accel/ktls_txrx.h"
7 #include "en_accel/ktls_utils.h"
8 
9 struct mlx5e_dump_wqe {
10 	struct mlx5_wqe_ctrl_seg ctrl;
11 	struct mlx5_wqe_data_seg data;
12 };
13 
14 #define MLX5E_KTLS_DUMP_WQEBBS \
15 	(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
16 
17 static u8
18 mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
19 			  unsigned int sync_len)
20 {
21 	/* Given the MTU and sync_len, calculates an upper bound for the
22 	 * number of DUMP WQEs needed for the TX resync of a record.
23 	 */
24 	return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
25 }
26 
27 u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
28 {
29 	u16 num_dumps, stop_room = 0;
30 
31 	if (!mlx5e_is_ktls_tx(mdev))
32 		return 0;
33 
34 	num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
35 
36 	stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
37 	stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
38 	stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
39 	stop_room += 1; /* fence nop */
40 
41 	return stop_room;
42 }
43 
44 static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
45 {
46 	MLX5_SET(tisc, tisc, tls_en, 1);
47 	MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
48 	MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
49 }
50 
51 static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
52 {
53 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
54 
55 	mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
56 
57 	return mlx5_core_create_tis(mdev, in, tisn);
58 }
59 
60 static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
61 				    struct mlx5_async_ctx *async_ctx,
62 				    u32 *out, int outlen,
63 				    mlx5_async_cbk_t callback,
64 				    struct mlx5_async_work *context)
65 {
66 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
67 
68 	mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
69 	MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
70 
71 	return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
72 				out, outlen, callback, context);
73 }
74 
75 static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
76 				     struct mlx5_async_ctx *async_ctx,
77 				     u32 *out, int outlen,
78 				     mlx5_async_cbk_t callback,
79 				     struct mlx5_async_work *context)
80 {
81 	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
82 
83 	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
84 	MLX5_SET(destroy_tis_in, in, tisn, tisn);
85 
86 	return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
87 				out, outlen, callback, context);
88 }
89 
90 struct mlx5e_ktls_offload_context_tx {
91 	/* fast path */
92 	u32 expected_seq;
93 	u32 tisn;
94 	bool ctx_post_pending;
95 	/* control / resync */
96 	struct list_head list_node; /* member of the pool */
97 	union mlx5e_crypto_info crypto_info;
98 	struct tls_offload_context_tx *tx_ctx;
99 	struct mlx5_core_dev *mdev;
100 	struct mlx5e_tls_sw_stats *sw_stats;
101 	struct mlx5_crypto_dek *dek;
102 	u8 create_err : 1;
103 };
104 
105 static void
106 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
107 			   struct mlx5e_ktls_offload_context_tx *priv_tx)
108 {
109 	struct mlx5e_ktls_offload_context_tx **ctx =
110 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
111 
112 	BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
113 
114 	*ctx = priv_tx;
115 }
116 
117 static struct mlx5e_ktls_offload_context_tx *
118 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
119 {
120 	struct mlx5e_ktls_offload_context_tx **ctx =
121 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
122 
123 	return *ctx;
124 }
125 
126 /* struct for callback API management */
127 struct mlx5e_async_ctx {
128 	struct mlx5_async_work context;
129 	struct mlx5_async_ctx *async_ctx;
130 	struct mlx5e_ktls_offload_context_tx *priv_tx;
131 	int err;
132 	union {
133 		u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
134 		u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
135 	};
136 };
137 
138 struct mlx5e_bulk_async_ctx {
139 	struct mlx5_async_ctx async_ctx;
140 	DECLARE_FLEX_ARRAY(struct mlx5e_async_ctx, arr);
141 };
142 
143 static struct mlx5e_bulk_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
144 {
145 	struct mlx5e_bulk_async_ctx *bulk_async;
146 	int sz;
147 	int i;
148 
149 	sz = struct_size(bulk_async, arr, n);
150 	bulk_async = kvzalloc(sz, GFP_KERNEL);
151 	if (!bulk_async)
152 		return NULL;
153 
154 	mlx5_cmd_init_async_ctx(mdev, &bulk_async->async_ctx);
155 
156 	for (i = 0; i < n; i++)
157 		bulk_async->arr[i].async_ctx = &bulk_async->async_ctx;
158 
159 	return bulk_async;
160 }
161 
162 static void mlx5e_bulk_async_cleanup(struct mlx5e_bulk_async_ctx *bulk_async)
163 {
164 	mlx5_cmd_cleanup_async_ctx(&bulk_async->async_ctx);
165 	kvfree(bulk_async);
166 }
167 
168 static void create_tis_callback(int status, struct mlx5_async_work *context)
169 {
170 	struct mlx5e_async_ctx *async =
171 		container_of(context, struct mlx5e_async_ctx, context);
172 	struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
173 
174 	if (status) {
175 		async->err = status;
176 		priv_tx->create_err = 1;
177 		return;
178 	}
179 
180 	priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
181 }
182 
183 static void destroy_tis_callback(int status, struct mlx5_async_work *context)
184 {
185 	struct mlx5e_async_ctx *async =
186 		container_of(context, struct mlx5e_async_ctx, context);
187 	struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
188 
189 	kfree(priv_tx);
190 }
191 
192 static struct mlx5e_ktls_offload_context_tx *
193 mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
194 		       struct mlx5e_async_ctx *async)
195 {
196 	struct mlx5e_ktls_offload_context_tx *priv_tx;
197 	int err;
198 
199 	priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
200 	if (!priv_tx)
201 		return ERR_PTR(-ENOMEM);
202 
203 	priv_tx->mdev = mdev;
204 	priv_tx->sw_stats = sw_stats;
205 
206 	if (!async) {
207 		err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
208 		if (err)
209 			goto err_out;
210 	} else {
211 		async->priv_tx = priv_tx;
212 		err = mlx5e_ktls_create_tis_cb(mdev, async->async_ctx,
213 					       async->out_create, sizeof(async->out_create),
214 					       create_tis_callback, &async->context);
215 		if (err)
216 			goto err_out;
217 	}
218 
219 	return priv_tx;
220 
221 err_out:
222 	kfree(priv_tx);
223 	return ERR_PTR(err);
224 }
225 
226 static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
227 				      struct mlx5e_async_ctx *async)
228 {
229 	if (priv_tx->create_err) {
230 		kfree(priv_tx);
231 		return;
232 	}
233 	async->priv_tx = priv_tx;
234 	mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
235 				  async->async_ctx,
236 				  async->out_destroy, sizeof(async->out_destroy),
237 				  destroy_tis_callback, &async->context);
238 }
239 
240 static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
241 					   struct list_head *list, int size)
242 {
243 	struct mlx5e_ktls_offload_context_tx *obj, *n;
244 	struct mlx5e_bulk_async_ctx *bulk_async;
245 	int i;
246 
247 	bulk_async = mlx5e_bulk_async_init(mdev, size);
248 	if (!bulk_async)
249 		return;
250 
251 	i = 0;
252 	list_for_each_entry_safe(obj, n, list, list_node) {
253 		mlx5e_tls_priv_tx_cleanup(obj, &bulk_async->arr[i]);
254 		i++;
255 	}
256 
257 	mlx5e_bulk_async_cleanup(bulk_async);
258 }
259 
260 /* Recycling pool API */
261 
262 #define MLX5E_TLS_TX_POOL_BULK (16)
263 #define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
264 #define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
265 
266 struct mlx5e_tls_tx_pool {
267 	struct mlx5_core_dev *mdev;
268 	struct mlx5e_tls_sw_stats *sw_stats;
269 	struct mutex lock; /* Protects access to the pool */
270 	struct list_head list;
271 	size_t size;
272 
273 	struct workqueue_struct *wq;
274 	struct work_struct create_work;
275 	struct work_struct destroy_work;
276 };
277 
278 static void create_work(struct work_struct *work)
279 {
280 	struct mlx5e_tls_tx_pool *pool =
281 		container_of(work, struct mlx5e_tls_tx_pool, create_work);
282 	struct mlx5e_ktls_offload_context_tx *obj;
283 	struct mlx5e_bulk_async_ctx *bulk_async;
284 	LIST_HEAD(local_list);
285 	int i, j, err = 0;
286 
287 	bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
288 	if (!bulk_async)
289 		return;
290 
291 	for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
292 		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]);
293 		if (IS_ERR(obj)) {
294 			err = PTR_ERR(obj);
295 			break;
296 		}
297 		list_add(&obj->list_node, &local_list);
298 	}
299 
300 	for (j = 0; j < i; j++) {
301 		struct mlx5e_async_ctx *async = &bulk_async->arr[j];
302 
303 		if (!err && async->err)
304 			err = async->err;
305 	}
306 	atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
307 	mlx5e_bulk_async_cleanup(bulk_async);
308 	if (err)
309 		goto err_out;
310 
311 	mutex_lock(&pool->lock);
312 	if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
313 		mutex_unlock(&pool->lock);
314 		goto err_out;
315 	}
316 	list_splice(&local_list, &pool->list);
317 	pool->size += MLX5E_TLS_TX_POOL_BULK;
318 	if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
319 		queue_work(pool->wq, work);
320 	mutex_unlock(&pool->lock);
321 	return;
322 
323 err_out:
324 	mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
325 	atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
326 }
327 
328 static void destroy_work(struct work_struct *work)
329 {
330 	struct mlx5e_tls_tx_pool *pool =
331 		container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
332 	struct mlx5e_ktls_offload_context_tx *obj;
333 	LIST_HEAD(local_list);
334 	int i = 0;
335 
336 	mutex_lock(&pool->lock);
337 	if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
338 		mutex_unlock(&pool->lock);
339 		return;
340 	}
341 
342 	list_for_each_entry(obj, &pool->list, list_node)
343 		if (++i == MLX5E_TLS_TX_POOL_BULK)
344 			break;
345 
346 	list_cut_position(&local_list, &pool->list, &obj->list_node);
347 	pool->size -= MLX5E_TLS_TX_POOL_BULK;
348 	if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
349 		queue_work(pool->wq, work);
350 	mutex_unlock(&pool->lock);
351 
352 	mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
353 	atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
354 }
355 
356 static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
357 							struct mlx5e_tls_sw_stats *sw_stats)
358 {
359 	struct mlx5e_tls_tx_pool *pool;
360 
361 	BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
362 
363 	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
364 	if (!pool)
365 		return NULL;
366 
367 	pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
368 	if (!pool->wq)
369 		goto err_free;
370 
371 	INIT_LIST_HEAD(&pool->list);
372 	mutex_init(&pool->lock);
373 
374 	INIT_WORK(&pool->create_work, create_work);
375 	INIT_WORK(&pool->destroy_work, destroy_work);
376 
377 	pool->mdev = mdev;
378 	pool->sw_stats = sw_stats;
379 
380 	return pool;
381 
382 err_free:
383 	kvfree(pool);
384 	return NULL;
385 }
386 
387 static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
388 {
389 	while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
390 		struct mlx5e_ktls_offload_context_tx *obj;
391 		LIST_HEAD(local_list);
392 		int i = 0;
393 
394 		list_for_each_entry(obj, &pool->list, list_node)
395 			if (++i == MLX5E_TLS_TX_POOL_BULK)
396 				break;
397 
398 		list_cut_position(&local_list, &pool->list, &obj->list_node);
399 		mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
400 		atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
401 		pool->size -= MLX5E_TLS_TX_POOL_BULK;
402 	}
403 	if (pool->size) {
404 		mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
405 		atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
406 	}
407 }
408 
409 static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
410 {
411 	mlx5e_tls_tx_pool_list_cleanup(pool);
412 	destroy_workqueue(pool->wq);
413 	kvfree(pool);
414 }
415 
416 static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
417 {
418 	mutex_lock(&pool->lock);
419 	list_add(&obj->list_node, &pool->list);
420 	if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
421 		queue_work(pool->wq, &pool->destroy_work);
422 	mutex_unlock(&pool->lock);
423 }
424 
425 static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
426 {
427 	struct mlx5e_ktls_offload_context_tx *obj;
428 
429 	mutex_lock(&pool->lock);
430 	if (unlikely(pool->size == 0)) {
431 		/* pool is empty:
432 		 * - trigger the populating work, and
433 		 * - serve the current context via the regular blocking api.
434 		 */
435 		queue_work(pool->wq, &pool->create_work);
436 		mutex_unlock(&pool->lock);
437 		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
438 		if (!IS_ERR(obj))
439 			atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
440 		return obj;
441 	}
442 
443 	obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
444 			       list_node);
445 	list_del(&obj->list_node);
446 	if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
447 		queue_work(pool->wq, &pool->create_work);
448 	mutex_unlock(&pool->lock);
449 	return obj;
450 }
451 
452 /* End of pool API */
453 
454 int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
455 		      struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
456 {
457 	struct mlx5e_ktls_offload_context_tx *priv_tx;
458 	struct mlx5e_tls_tx_pool *pool;
459 	struct tls_context *tls_ctx;
460 	struct mlx5_crypto_dek *dek;
461 	struct mlx5e_priv *priv;
462 	int err;
463 
464 	tls_ctx = tls_get_ctx(sk);
465 	priv = netdev_priv(netdev);
466 	pool = priv->tls->tx_pool;
467 
468 	priv_tx = pool_pop(pool);
469 	if (IS_ERR(priv_tx))
470 		return PTR_ERR(priv_tx);
471 
472 	dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
473 	if (IS_ERR(dek)) {
474 		err = PTR_ERR(dek);
475 		goto err_create_key;
476 	}
477 	priv_tx->dek = dek;
478 
479 	priv_tx->expected_seq = start_offload_tcp_sn;
480 	switch (crypto_info->cipher_type) {
481 	case TLS_CIPHER_AES_GCM_128:
482 		priv_tx->crypto_info.crypto_info_128 =
483 			*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
484 		break;
485 	case TLS_CIPHER_AES_GCM_256:
486 		priv_tx->crypto_info.crypto_info_256 =
487 			*(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
488 		break;
489 	default:
490 		WARN_ONCE(1, "Unsupported cipher type %u\n",
491 			  crypto_info->cipher_type);
492 		return -EOPNOTSUPP;
493 	}
494 	priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
495 
496 	mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
497 
498 	priv_tx->ctx_post_pending = true;
499 	atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
500 
501 	return 0;
502 
503 err_create_key:
504 	pool_push(pool, priv_tx);
505 	return err;
506 }
507 
508 void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
509 {
510 	struct mlx5e_ktls_offload_context_tx *priv_tx;
511 	struct mlx5e_tls_tx_pool *pool;
512 	struct mlx5e_priv *priv;
513 
514 	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
515 	priv = netdev_priv(netdev);
516 	pool = priv->tls->tx_pool;
517 
518 	atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
519 	mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_tx->dek);
520 	pool_push(pool, priv_tx);
521 }
522 
523 static void tx_fill_wi(struct mlx5e_txqsq *sq,
524 		       u16 pi, u8 num_wqebbs, u32 num_bytes,
525 		       struct page *page)
526 {
527 	struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
528 
529 	*wi = (struct mlx5e_tx_wqe_info) {
530 		.num_wqebbs = num_wqebbs,
531 		.num_bytes  = num_bytes,
532 		.resync_dump_frag_page = page,
533 	};
534 }
535 
536 static bool
537 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
538 {
539 	bool ret = priv_tx->ctx_post_pending;
540 
541 	priv_tx->ctx_post_pending = false;
542 
543 	return ret;
544 }
545 
546 static void
547 post_static_params(struct mlx5e_txqsq *sq,
548 		   struct mlx5e_ktls_offload_context_tx *priv_tx,
549 		   bool fence)
550 {
551 	struct mlx5e_set_tls_static_params_wqe *wqe;
552 	u16 pi, num_wqebbs;
553 
554 	num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
555 	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
556 	wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
557 	mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
558 				       priv_tx->tisn,
559 				       mlx5_crypto_dek_get_id(priv_tx->dek),
560 				       0, fence, TLS_OFFLOAD_CTX_DIR_TX);
561 	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
562 	sq->pc += num_wqebbs;
563 }
564 
565 static void
566 post_progress_params(struct mlx5e_txqsq *sq,
567 		     struct mlx5e_ktls_offload_context_tx *priv_tx,
568 		     bool fence)
569 {
570 	struct mlx5e_set_tls_progress_params_wqe *wqe;
571 	u16 pi, num_wqebbs;
572 
573 	num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
574 	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
575 	wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
576 	mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
577 					 TLS_OFFLOAD_CTX_DIR_TX);
578 	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
579 	sq->pc += num_wqebbs;
580 }
581 
582 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
583 {
584 	struct mlx5_wq_cyc *wq = &sq->wq;
585 	u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
586 
587 	tx_fill_wi(sq, pi, 1, 0, NULL);
588 
589 	mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
590 }
591 
592 static void
593 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
594 			      struct mlx5e_ktls_offload_context_tx *priv_tx,
595 			      bool skip_static_post, bool fence_first_post)
596 {
597 	bool progress_fence = skip_static_post || !fence_first_post;
598 
599 	if (!skip_static_post)
600 		post_static_params(sq, priv_tx, fence_first_post);
601 
602 	post_progress_params(sq, priv_tx, progress_fence);
603 	tx_post_fence_nop(sq);
604 }
605 
606 struct tx_sync_info {
607 	u64 rcd_sn;
608 	u32 sync_len;
609 	int nr_frags;
610 	skb_frag_t frags[MAX_SKB_FRAGS];
611 };
612 
613 enum mlx5e_ktls_sync_retval {
614 	MLX5E_KTLS_SYNC_DONE,
615 	MLX5E_KTLS_SYNC_FAIL,
616 	MLX5E_KTLS_SYNC_SKIP_NO_DATA,
617 };
618 
619 static enum mlx5e_ktls_sync_retval
620 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
621 		 u32 tcp_seq, int datalen, struct tx_sync_info *info)
622 {
623 	struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
624 	enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
625 	struct tls_record_info *record;
626 	int remaining, i = 0;
627 	unsigned long flags;
628 	bool ends_before;
629 
630 	spin_lock_irqsave(&tx_ctx->lock, flags);
631 	record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
632 
633 	if (unlikely(!record)) {
634 		ret = MLX5E_KTLS_SYNC_FAIL;
635 		goto out;
636 	}
637 
638 	/* There are the following cases:
639 	 * 1. packet ends before start marker: bypass offload.
640 	 * 2. packet starts before start marker and ends after it: drop,
641 	 *    not supported, breaks contract with kernel.
642 	 * 3. packet ends before tls record info starts: drop,
643 	 *    this packet was already acknowledged and its record info
644 	 *    was released.
645 	 */
646 	ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
647 
648 	if (unlikely(tls_record_is_start_marker(record))) {
649 		ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
650 		goto out;
651 	} else if (ends_before) {
652 		ret = MLX5E_KTLS_SYNC_FAIL;
653 		goto out;
654 	}
655 
656 	info->sync_len = tcp_seq - tls_record_start_seq(record);
657 	remaining = info->sync_len;
658 	while (remaining > 0) {
659 		skb_frag_t *frag = &record->frags[i];
660 
661 		get_page(skb_frag_page(frag));
662 		remaining -= skb_frag_size(frag);
663 		info->frags[i++] = *frag;
664 	}
665 	/* reduce the part which will be sent with the original SKB */
666 	if (remaining < 0)
667 		skb_frag_size_add(&info->frags[i - 1], remaining);
668 	info->nr_frags = i;
669 out:
670 	spin_unlock_irqrestore(&tx_ctx->lock, flags);
671 	return ret;
672 }
673 
674 static void
675 tx_post_resync_params(struct mlx5e_txqsq *sq,
676 		      struct mlx5e_ktls_offload_context_tx *priv_tx,
677 		      u64 rcd_sn)
678 {
679 	__be64 rn_be = cpu_to_be64(rcd_sn);
680 	bool skip_static_post;
681 	u16 rec_seq_sz;
682 	char *rec_seq;
683 
684 	switch (priv_tx->crypto_info.crypto_info.cipher_type) {
685 	case TLS_CIPHER_AES_GCM_128: {
686 		struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;
687 
688 		rec_seq = info->rec_seq;
689 		rec_seq_sz = sizeof(info->rec_seq);
690 		break;
691 	}
692 	case TLS_CIPHER_AES_GCM_256: {
693 		struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;
694 
695 		rec_seq = info->rec_seq;
696 		rec_seq_sz = sizeof(info->rec_seq);
697 		break;
698 	}
699 	default:
700 		WARN_ONCE(1, "Unsupported cipher type %u\n",
701 			  priv_tx->crypto_info.crypto_info.cipher_type);
702 		return;
703 	}
704 
705 	skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
706 	if (!skip_static_post)
707 		memcpy(rec_seq, &rn_be, rec_seq_sz);
708 
709 	mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
710 }
711 
712 static int
713 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
714 {
715 	struct mlx5_wqe_ctrl_seg *cseg;
716 	struct mlx5_wqe_data_seg *dseg;
717 	struct mlx5e_dump_wqe *wqe;
718 	dma_addr_t dma_addr = 0;
719 	u16 ds_cnt;
720 	int fsz;
721 	u16 pi;
722 
723 	BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
724 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
725 	wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
726 
727 	ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
728 
729 	cseg = &wqe->ctrl;
730 	dseg = &wqe->data;
731 
732 	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
733 	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
734 	cseg->tis_tir_num      = cpu_to_be32(tisn << 8);
735 
736 	fsz = skb_frag_size(frag);
737 	dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
738 				    DMA_TO_DEVICE);
739 	if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
740 		return -ENOMEM;
741 
742 	dseg->addr       = cpu_to_be64(dma_addr);
743 	dseg->lkey       = sq->mkey_be;
744 	dseg->byte_count = cpu_to_be32(fsz);
745 	mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
746 
747 	tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
748 	sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
749 
750 	return 0;
751 }
752 
753 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
754 					   struct mlx5e_tx_wqe_info *wi,
755 					   u32 *dma_fifo_cc)
756 {
757 	struct mlx5e_sq_stats *stats;
758 	struct mlx5e_sq_dma *dma;
759 
760 	dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
761 	stats = sq->stats;
762 
763 	mlx5e_tx_dma_unmap(sq->pdev, dma);
764 	put_page(wi->resync_dump_frag_page);
765 	stats->tls_dump_packets++;
766 	stats->tls_dump_bytes += wi->num_bytes;
767 }
768 
769 static enum mlx5e_ktls_sync_retval
770 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
771 			 struct mlx5e_txqsq *sq,
772 			 int datalen,
773 			 u32 seq)
774 {
775 	enum mlx5e_ktls_sync_retval ret;
776 	struct tx_sync_info info = {};
777 	int i;
778 
779 	ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
780 	if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
781 		/* We might get here with ret == FAIL if a retransmission
782 		 * reaches the driver after the relevant record is acked.
783 		 * It should be safe to drop the packet in this case
784 		 */
785 		return ret;
786 
787 	tx_post_resync_params(sq, priv_tx, info.rcd_sn);
788 
789 	for (i = 0; i < info.nr_frags; i++) {
790 		unsigned int orig_fsz, frag_offset = 0, n = 0;
791 		skb_frag_t *f = &info.frags[i];
792 
793 		orig_fsz = skb_frag_size(f);
794 
795 		do {
796 			unsigned int fsz;
797 
798 			n++;
799 			fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
800 			skb_frag_size_set(f, fsz);
801 			if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
802 				page_ref_add(skb_frag_page(f), n - 1);
803 				goto err_out;
804 			}
805 
806 			skb_frag_off_add(f, fsz);
807 			frag_offset += fsz;
808 		} while (frag_offset < orig_fsz);
809 
810 		page_ref_add(skb_frag_page(f), n - 1);
811 	}
812 
813 	return MLX5E_KTLS_SYNC_DONE;
814 
815 err_out:
816 	for (; i < info.nr_frags; i++)
817 		/* The put_page() here undoes the page ref obtained in tx_sync_info_get().
818 		 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
819 		 * released only upon their completions (or in mlx5e_free_txqsq_descs,
820 		 * if channel closes).
821 		 */
822 		put_page(skb_frag_page(&info.frags[i]));
823 
824 	return MLX5E_KTLS_SYNC_FAIL;
825 }
826 
827 bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
828 			      struct sk_buff *skb,
829 			      struct mlx5e_accel_tx_tls_state *state)
830 {
831 	struct mlx5e_ktls_offload_context_tx *priv_tx;
832 	struct mlx5e_sq_stats *stats = sq->stats;
833 	struct net_device *tls_netdev;
834 	struct tls_context *tls_ctx;
835 	int datalen;
836 	u32 seq;
837 
838 	datalen = skb->len - skb_tcp_all_headers(skb);
839 	if (!datalen)
840 		return true;
841 
842 	mlx5e_tx_mpwqe_ensure_complete(sq);
843 
844 	tls_ctx = tls_get_ctx(skb->sk);
845 	tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
846 	/* Don't WARN on NULL: if tls_device_down is running in parallel,
847 	 * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
848 	 * true. Rather continue processing this packet.
849 	 */
850 	if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
851 		goto err_out;
852 
853 	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
854 
855 	if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
856 		mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
857 
858 	seq = ntohl(tcp_hdr(skb)->seq);
859 	if (unlikely(priv_tx->expected_seq != seq)) {
860 		enum mlx5e_ktls_sync_retval ret =
861 			mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
862 
863 		stats->tls_ooo++;
864 
865 		switch (ret) {
866 		case MLX5E_KTLS_SYNC_DONE:
867 			break;
868 		case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
869 			stats->tls_skip_no_sync_data++;
870 			if (likely(!skb->decrypted))
871 				goto out;
872 			WARN_ON_ONCE(1);
873 			goto err_out;
874 		case MLX5E_KTLS_SYNC_FAIL:
875 			stats->tls_drop_no_sync_data++;
876 			goto err_out;
877 		}
878 	}
879 
880 	priv_tx->expected_seq = seq + datalen;
881 
882 	state->tls_tisn = priv_tx->tisn;
883 
884 	stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
885 	stats->tls_encrypted_bytes   += datalen;
886 
887 out:
888 	return true;
889 
890 err_out:
891 	dev_kfree_skb_any(skb);
892 	return false;
893 }
894 
895 static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls,
896 				      struct dentry *dfs_root)
897 {
898 	if (IS_ERR_OR_NULL(dfs_root))
899 		return;
900 
901 	tls->debugfs.dfs_tx = debugfs_create_dir("tx", dfs_root);
902 
903 	debugfs_create_size_t("pool_size", 0400, tls->debugfs.dfs_tx,
904 			      &tls->tx_pool->size);
905 }
906 
907 int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
908 {
909 	struct mlx5e_tls *tls = priv->tls;
910 
911 	if (!mlx5e_is_ktls_tx(priv->mdev))
912 		return 0;
913 
914 	priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
915 	if (!priv->tls->tx_pool)
916 		return -ENOMEM;
917 
918 	mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs);
919 
920 	return 0;
921 }
922 
923 void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
924 {
925 	if (!mlx5e_is_ktls_tx(priv->mdev))
926 		return;
927 
928 	debugfs_remove_recursive(priv->tls->debugfs.dfs_tx);
929 	priv->tls->debugfs.dfs_tx = NULL;
930 
931 	mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
932 	priv->tls->tx_pool = NULL;
933 }
934