1 /*
2  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/mlx5/device.h>
35 #include "fpga/tls.h"
36 #include "fpga/cmd.h"
37 #include "fpga/sdk.h"
38 #include "fpga/core.h"
39 #include "accel/tls.h"
40 
41 struct mlx5_fpga_tls_command_context;
42 
43 typedef void (*mlx5_fpga_tls_command_complete)
44 	(struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
45 	 struct mlx5_fpga_tls_command_context *ctx,
46 	 struct mlx5_fpga_dma_buf *resp);
47 
48 struct mlx5_fpga_tls_command_context {
49 	struct list_head list;
50 	/* There is no guarantee on the order between the TX completion
51 	 * and the command response.
52 	 * The TX completion is going to touch cmd->buf even in
53 	 * the case of successful transmission.
54 	 * So instead of requiring separate allocations for cmd
55 	 * and cmd->buf we've decided to use a reference counter
56 	 */
57 	refcount_t ref;
58 	struct mlx5_fpga_dma_buf buf;
59 	mlx5_fpga_tls_command_complete complete;
60 };
61 
62 static void
mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context * ctx)63 mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
64 {
65 	if (refcount_dec_and_test(&ctx->ref))
66 		kfree(ctx);
67 }
68 
mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device * fdev,struct mlx5_fpga_dma_buf * resp)69 static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
70 				       struct mlx5_fpga_dma_buf *resp)
71 {
72 	struct mlx5_fpga_conn *conn = fdev->tls->conn;
73 	struct mlx5_fpga_tls_command_context *ctx;
74 	struct mlx5_fpga_tls *tls = fdev->tls;
75 	unsigned long flags;
76 
77 	spin_lock_irqsave(&tls->pending_cmds_lock, flags);
78 	ctx = list_first_entry(&tls->pending_cmds,
79 			       struct mlx5_fpga_tls_command_context, list);
80 	list_del(&ctx->list);
81 	spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
82 	ctx->complete(conn, fdev, ctx, resp);
83 }
84 
mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn * conn,struct mlx5_fpga_device * fdev,struct mlx5_fpga_dma_buf * buf,u8 status)85 static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
86 					struct mlx5_fpga_device *fdev,
87 					struct mlx5_fpga_dma_buf *buf,
88 					u8 status)
89 {
90 	struct mlx5_fpga_tls_command_context *ctx =
91 	    container_of(buf, struct mlx5_fpga_tls_command_context, buf);
92 
93 	mlx5_fpga_tls_put_command_ctx(ctx);
94 
95 	if (unlikely(status))
96 		mlx5_fpga_tls_cmd_complete(fdev, NULL);
97 }
98 
mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device * fdev,struct mlx5_fpga_tls_command_context * cmd,mlx5_fpga_tls_command_complete complete)99 static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
100 				   struct mlx5_fpga_tls_command_context *cmd,
101 				   mlx5_fpga_tls_command_complete complete)
102 {
103 	struct mlx5_fpga_tls *tls = fdev->tls;
104 	unsigned long flags;
105 	int ret;
106 
107 	refcount_set(&cmd->ref, 2);
108 	cmd->complete = complete;
109 	cmd->buf.complete = mlx5_fpga_cmd_send_complete;
110 
111 	spin_lock_irqsave(&tls->pending_cmds_lock, flags);
112 	/* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
113 	 * to make sure commands are inserted to the tls->pending_cmds list
114 	 * and the command QP in the same order.
115 	 */
116 	ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
117 	if (likely(!ret))
118 		list_add_tail(&cmd->list, &tls->pending_cmds);
119 	else
120 		complete(tls->conn, fdev, cmd, NULL);
121 	spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
122 }
123 
124 /* Start of context identifiers range (inclusive) */
125 #define SWID_START	0
126 /* End of context identifiers range (exclusive) */
127 #define SWID_END	BIT(24)
128 
mlx5_fpga_tls_alloc_swid(struct idr * idr,spinlock_t * idr_spinlock,void * ptr)129 static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
130 				    void *ptr)
131 {
132 	unsigned long flags;
133 	int ret;
134 
135 	/* TLS metadata format is 1 byte for syndrome followed
136 	 * by 3 bytes of swid (software ID)
137 	 * swid must not exceed 3 bytes.
138 	 * See tls_rxtx.c:insert_pet() for details
139 	 */
140 	BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
141 
142 	idr_preload(GFP_KERNEL);
143 	spin_lock_irqsave(idr_spinlock, flags);
144 	ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
145 	spin_unlock_irqrestore(idr_spinlock, flags);
146 	idr_preload_end();
147 
148 	return ret;
149 }
150 
mlx5_fpga_tls_release_swid(struct idr * idr,spinlock_t * idr_spinlock,u32 swid)151 static void *mlx5_fpga_tls_release_swid(struct idr *idr,
152 					spinlock_t *idr_spinlock, u32 swid)
153 {
154 	unsigned long flags;
155 	void *ptr;
156 
157 	spin_lock_irqsave(idr_spinlock, flags);
158 	ptr = idr_remove(idr, swid);
159 	spin_unlock_irqrestore(idr_spinlock, flags);
160 	return ptr;
161 }
162 
mlx_tls_kfree_complete(struct mlx5_fpga_conn * conn,struct mlx5_fpga_device * fdev,struct mlx5_fpga_dma_buf * buf,u8 status)163 static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
164 				   struct mlx5_fpga_device *fdev,
165 				   struct mlx5_fpga_dma_buf *buf, u8 status)
166 {
167 	kfree(buf);
168 }
169 
170 static void
mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn * conn,struct mlx5_fpga_device * fdev,struct mlx5_fpga_tls_command_context * cmd,struct mlx5_fpga_dma_buf * resp)171 mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
172 				  struct mlx5_fpga_device *fdev,
173 				  struct mlx5_fpga_tls_command_context *cmd,
174 				  struct mlx5_fpga_dma_buf *resp)
175 {
176 	if (resp) {
177 		u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
178 
179 		if (syndrome)
180 			mlx5_fpga_err(fdev,
181 				      "Teardown stream failed with syndrome = %d",
182 				      syndrome);
183 	}
184 	mlx5_fpga_tls_put_command_ctx(cmd);
185 }
186 
mlx5_fpga_tls_flow_to_cmd(void * flow,void * cmd)187 static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
188 {
189 	memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
190 	       MLX5_BYTE_OFF(tls_flow, ipv6));
191 
192 	MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
193 	MLX5_SET(tls_cmd, cmd, direction_sx,
194 		 MLX5_GET(tls_flow, flow, direction_sx));
195 }
196 
mlx5_fpga_tls_resync_rx(struct mlx5_core_dev * mdev,__be32 handle,u32 seq,__be64 rcd_sn)197 int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
198 			    u32 seq, __be64 rcd_sn)
199 {
200 	struct mlx5_fpga_dma_buf *buf;
201 	int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
202 	void *flow;
203 	void *cmd;
204 	int ret;
205 
206 	buf = kzalloc(size, GFP_ATOMIC);
207 	if (!buf)
208 		return -ENOMEM;
209 
210 	cmd = (buf + 1);
211 
212 	rcu_read_lock();
213 	flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
214 	if (unlikely(!flow)) {
215 		rcu_read_unlock();
216 		WARN_ONCE(1, "Received NULL pointer for handle\n");
217 		kfree(buf);
218 		return -EINVAL;
219 	}
220 	mlx5_fpga_tls_flow_to_cmd(flow, cmd);
221 	rcu_read_unlock();
222 
223 	MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
224 	MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
225 	MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
226 	MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
227 
228 	buf->sg[0].data = cmd;
229 	buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
230 	buf->complete = mlx_tls_kfree_complete;
231 
232 	ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
233 	if (ret < 0)
234 		kfree(buf);
235 
236 	return ret;
237 }
238 
mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev * mdev,void * flow,u32 swid,gfp_t flags)239 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
240 					    void *flow, u32 swid, gfp_t flags)
241 {
242 	struct mlx5_fpga_tls_command_context *ctx;
243 	struct mlx5_fpga_dma_buf *buf;
244 	void *cmd;
245 
246 	ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
247 	if (!ctx)
248 		return;
249 
250 	buf = &ctx->buf;
251 	cmd = (ctx + 1);
252 	MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
253 	MLX5_SET(tls_cmd, cmd, swid, swid);
254 
255 	mlx5_fpga_tls_flow_to_cmd(flow, cmd);
256 	kfree(flow);
257 
258 	buf->sg[0].data = cmd;
259 	buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
260 
261 	mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
262 			       mlx5_fpga_tls_teardown_completion);
263 }
264 
mlx5_fpga_tls_del_flow(struct mlx5_core_dev * mdev,u32 swid,gfp_t flags,bool direction_sx)265 void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
266 			    gfp_t flags, bool direction_sx)
267 {
268 	struct mlx5_fpga_tls *tls = mdev->fpga->tls;
269 	void *flow;
270 
271 	if (direction_sx)
272 		flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
273 						  &tls->tx_idr_spinlock,
274 						  swid);
275 	else
276 		flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
277 						  &tls->rx_idr_spinlock,
278 						  swid);
279 
280 	if (!flow) {
281 		mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
282 			      swid);
283 		return;
284 	}
285 
286 	synchronize_rcu(); /* before kfree(flow) */
287 	mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
288 }
289 
290 enum mlx5_fpga_setup_stream_status {
291 	MLX5_FPGA_CMD_PENDING,
292 	MLX5_FPGA_CMD_SEND_FAILED,
293 	MLX5_FPGA_CMD_RESPONSE_RECEIVED,
294 	MLX5_FPGA_CMD_ABANDONED,
295 };
296 
297 struct mlx5_setup_stream_context {
298 	struct mlx5_fpga_tls_command_context cmd;
299 	atomic_t status;
300 	u32 syndrome;
301 	struct completion comp;
302 };
303 
304 static void
mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn * conn,struct mlx5_fpga_device * fdev,struct mlx5_fpga_tls_command_context * cmd,struct mlx5_fpga_dma_buf * resp)305 mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
306 			       struct mlx5_fpga_device *fdev,
307 			       struct mlx5_fpga_tls_command_context *cmd,
308 			       struct mlx5_fpga_dma_buf *resp)
309 {
310 	struct mlx5_setup_stream_context *ctx =
311 	    container_of(cmd, struct mlx5_setup_stream_context, cmd);
312 	int status = MLX5_FPGA_CMD_SEND_FAILED;
313 	void *tls_cmd = ctx + 1;
314 
315 	/* If we failed to send to command resp == NULL */
316 	if (resp) {
317 		ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
318 		status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
319 	}
320 
321 	status = atomic_xchg_release(&ctx->status, status);
322 	if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
323 		complete(&ctx->comp);
324 		return;
325 	}
326 
327 	mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
328 		      ctx->syndrome);
329 
330 	if (!ctx->syndrome) {
331 		/* The process was killed while waiting for the context to be
332 		 * added, and the add completed successfully.
333 		 * We need to destroy the HW context, and we can't can't reuse
334 		 * the command context because we might not have received
335 		 * the tx completion yet.
336 		 */
337 		mlx5_fpga_tls_del_flow(fdev->mdev,
338 				       MLX5_GET(tls_cmd, tls_cmd, swid),
339 				       GFP_ATOMIC,
340 				       MLX5_GET(tls_cmd, tls_cmd,
341 						direction_sx));
342 	}
343 
344 	mlx5_fpga_tls_put_command_ctx(cmd);
345 }
346 
mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev * mdev,struct mlx5_setup_stream_context * ctx)347 static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
348 					  struct mlx5_setup_stream_context *ctx)
349 {
350 	struct mlx5_fpga_dma_buf *buf;
351 	void *cmd = ctx + 1;
352 	int status, ret = 0;
353 
354 	buf = &ctx->cmd.buf;
355 	buf->sg[0].data = cmd;
356 	buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
357 	MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
358 
359 	init_completion(&ctx->comp);
360 	atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
361 	ctx->syndrome = -1;
362 
363 	mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
364 			       mlx5_fpga_tls_setup_completion);
365 	wait_for_completion_killable(&ctx->comp);
366 
367 	status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
368 	if (unlikely(status == MLX5_FPGA_CMD_PENDING))
369 	/* ctx is going to be released in mlx5_fpga_tls_setup_completion */
370 		return -EINTR;
371 
372 	if (unlikely(ctx->syndrome))
373 		ret = -ENOMEM;
374 
375 	mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
376 	return ret;
377 }
378 
mlx5_fpga_tls_hw_qp_recv_cb(void * cb_arg,struct mlx5_fpga_dma_buf * buf)379 static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
380 					struct mlx5_fpga_dma_buf *buf)
381 {
382 	struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
383 
384 	mlx5_fpga_tls_cmd_complete(fdev, buf);
385 }
386 
mlx5_fpga_is_tls_device(struct mlx5_core_dev * mdev)387 bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
388 {
389 	if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
390 		return false;
391 
392 	if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
393 	    MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
394 		return false;
395 
396 	if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
397 	    MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
398 		return false;
399 
400 	if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
401 		return false;
402 
403 	return true;
404 }
405 
mlx5_fpga_tls_get_caps(struct mlx5_fpga_device * fdev,u32 * p_caps)406 static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
407 				  u32 *p_caps)
408 {
409 	int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
410 	u32 caps = 0;
411 	void *buf;
412 
413 	buf = kzalloc(cap_size, GFP_KERNEL);
414 	if (!buf)
415 		return -ENOMEM;
416 
417 	err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
418 	if (err)
419 		goto out;
420 
421 	if (MLX5_GET(tls_extended_cap, buf, tx))
422 		caps |= MLX5_ACCEL_TLS_TX;
423 	if (MLX5_GET(tls_extended_cap, buf, rx))
424 		caps |= MLX5_ACCEL_TLS_RX;
425 	if (MLX5_GET(tls_extended_cap, buf, tls_v12))
426 		caps |= MLX5_ACCEL_TLS_V12;
427 	if (MLX5_GET(tls_extended_cap, buf, tls_v13))
428 		caps |= MLX5_ACCEL_TLS_V13;
429 	if (MLX5_GET(tls_extended_cap, buf, lro))
430 		caps |= MLX5_ACCEL_TLS_LRO;
431 	if (MLX5_GET(tls_extended_cap, buf, ipv6))
432 		caps |= MLX5_ACCEL_TLS_IPV6;
433 
434 	if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
435 		caps |= MLX5_ACCEL_TLS_AES_GCM128;
436 	if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
437 		caps |= MLX5_ACCEL_TLS_AES_GCM256;
438 
439 	*p_caps = caps;
440 	err = 0;
441 out:
442 	kfree(buf);
443 	return err;
444 }
445 
mlx5_fpga_tls_init(struct mlx5_core_dev * mdev)446 int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
447 {
448 	struct mlx5_fpga_device *fdev = mdev->fpga;
449 	struct mlx5_fpga_conn_attr init_attr = {0};
450 	struct mlx5_fpga_conn *conn;
451 	struct mlx5_fpga_tls *tls;
452 	int err = 0;
453 
454 	if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
455 		return 0;
456 
457 	tls = kzalloc(sizeof(*tls), GFP_KERNEL);
458 	if (!tls)
459 		return -ENOMEM;
460 
461 	err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
462 	if (err)
463 		goto error;
464 
465 	if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
466 		err = -ENOTSUPP;
467 		goto error;
468 	}
469 
470 	init_attr.rx_size = SBU_QP_QUEUE_SIZE;
471 	init_attr.tx_size = SBU_QP_QUEUE_SIZE;
472 	init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
473 	init_attr.cb_arg = fdev;
474 	conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
475 	if (IS_ERR(conn)) {
476 		err = PTR_ERR(conn);
477 		mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
478 			      err);
479 		goto error;
480 	}
481 
482 	tls->conn = conn;
483 	spin_lock_init(&tls->pending_cmds_lock);
484 	INIT_LIST_HEAD(&tls->pending_cmds);
485 
486 	idr_init(&tls->tx_idr);
487 	idr_init(&tls->rx_idr);
488 	spin_lock_init(&tls->tx_idr_spinlock);
489 	spin_lock_init(&tls->rx_idr_spinlock);
490 	fdev->tls = tls;
491 	return 0;
492 
493 error:
494 	kfree(tls);
495 	return err;
496 }
497 
mlx5_fpga_tls_cleanup(struct mlx5_core_dev * mdev)498 void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
499 {
500 	struct mlx5_fpga_device *fdev = mdev->fpga;
501 
502 	if (!fdev || !fdev->tls)
503 		return;
504 
505 	mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
506 	kfree(fdev->tls);
507 	fdev->tls = NULL;
508 }
509 
mlx5_fpga_tls_set_aes_gcm128_ctx(void * cmd,struct tls_crypto_info * info,__be64 * rcd_sn)510 static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
511 					     struct tls_crypto_info *info,
512 					     __be64 *rcd_sn)
513 {
514 	struct tls12_crypto_info_aes_gcm_128 *crypto_info =
515 	    (struct tls12_crypto_info_aes_gcm_128 *)info;
516 
517 	memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
518 	       TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
519 
520 	memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
521 	       crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
522 	memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
523 	       crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
524 
525 	/* in AES-GCM 128 we need to write the key twice */
526 	memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
527 		   TLS_CIPHER_AES_GCM_128_KEY_SIZE,
528 	       crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
529 
530 	MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
531 }
532 
mlx5_fpga_tls_set_key_material(void * cmd,u32 caps,struct tls_crypto_info * crypto_info)533 static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
534 					  struct tls_crypto_info *crypto_info)
535 {
536 	__be64 rcd_sn;
537 
538 	switch (crypto_info->cipher_type) {
539 	case TLS_CIPHER_AES_GCM_128:
540 		if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
541 			return -EINVAL;
542 		mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
543 		break;
544 	default:
545 		return -EINVAL;
546 	}
547 
548 	return 0;
549 }
550 
_mlx5_fpga_tls_add_flow(struct mlx5_core_dev * mdev,void * flow,struct tls_crypto_info * crypto_info,u32 swid,u32 tcp_sn)551 static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
552 				   struct tls_crypto_info *crypto_info,
553 				   u32 swid, u32 tcp_sn)
554 {
555 	u32 caps = mlx5_fpga_tls_device_caps(mdev);
556 	struct mlx5_setup_stream_context *ctx;
557 	int ret = -ENOMEM;
558 	size_t cmd_size;
559 	void *cmd;
560 
561 	cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
562 	ctx = kzalloc(cmd_size, GFP_KERNEL);
563 	if (!ctx)
564 		goto out;
565 
566 	cmd = ctx + 1;
567 	ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
568 	if (ret)
569 		goto free_ctx;
570 
571 	mlx5_fpga_tls_flow_to_cmd(flow, cmd);
572 
573 	MLX5_SET(tls_cmd, cmd, swid, swid);
574 	MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
575 
576 	return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
577 
578 free_ctx:
579 	kfree(ctx);
580 out:
581 	return ret;
582 }
583 
mlx5_fpga_tls_add_flow(struct mlx5_core_dev * mdev,void * flow,struct tls_crypto_info * crypto_info,u32 start_offload_tcp_sn,u32 * p_swid,bool direction_sx)584 int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
585 			   struct tls_crypto_info *crypto_info,
586 			   u32 start_offload_tcp_sn, u32 *p_swid,
587 			   bool direction_sx)
588 {
589 	struct mlx5_fpga_tls *tls = mdev->fpga->tls;
590 	int ret = -ENOMEM;
591 	u32 swid;
592 
593 	if (direction_sx)
594 		ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
595 					       &tls->tx_idr_spinlock, flow);
596 	else
597 		ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
598 					       &tls->rx_idr_spinlock, flow);
599 
600 	if (ret < 0)
601 		return ret;
602 
603 	swid = ret;
604 	MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
605 
606 	ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
607 				      start_offload_tcp_sn);
608 	if (ret && ret != -EINTR)
609 		goto free_swid;
610 
611 	*p_swid = swid;
612 	return 0;
613 free_swid:
614 	if (direction_sx)
615 		mlx5_fpga_tls_release_swid(&tls->tx_idr,
616 					   &tls->tx_idr_spinlock, swid);
617 	else
618 		mlx5_fpga_tls_release_swid(&tls->rx_idr,
619 					   &tls->rx_idr_spinlock, swid);
620 
621 	return ret;
622 }
623