1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/ipv6.h>
6 #include <linux/skbuff.h>
7 #include <linux/string.h>
8 #include <net/tls.h>
9 
10 #include "../ccm.h"
11 #include "../nfp_net.h"
12 #include "crypto.h"
13 #include "fw.h"
14 
15 #define NFP_NET_TLS_CCM_MBOX_OPS_MASK		\
16 	(BIT(NFP_CCM_TYPE_CRYPTO_RESET) |	\
17 	 BIT(NFP_CCM_TYPE_CRYPTO_ADD) |		\
18 	 BIT(NFP_CCM_TYPE_CRYPTO_DEL) |		\
19 	 BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
20 
21 #define NFP_NET_TLS_OPCODE_MASK_RX			\
22 	BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
23 
24 #define NFP_NET_TLS_OPCODE_MASK_TX			\
25 	BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
26 
27 #define NFP_NET_TLS_OPCODE_MASK						\
28 	(NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
29 
30 static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
31 {
32 	u32 off, val;
33 
34 	off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
35 
36 	val = nn_readl(nn, off);
37 	if (on)
38 		val |= BIT(opcode & 31);
39 	else
40 		val &= ~BIT(opcode & 31);
41 	nn_writel(nn, off, val);
42 }
43 
44 static bool
45 __nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
46 			       enum tls_offload_ctx_dir direction)
47 {
48 	u8 opcode;
49 	int cnt;
50 
51 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
52 		opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
53 		nn->ktls_tx_conn_cnt += add;
54 		cnt = nn->ktls_tx_conn_cnt;
55 		nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
56 	} else {
57 		opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
58 		nn->ktls_rx_conn_cnt += add;
59 		cnt = nn->ktls_rx_conn_cnt;
60 	}
61 
62 	/* Care only about 0 -> 1 and 1 -> 0 transitions */
63 	if (cnt > 1)
64 		return false;
65 
66 	nfp_net_crypto_set_op(nn, opcode, cnt);
67 	return true;
68 }
69 
70 static int
71 nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
72 			     enum tls_offload_ctx_dir direction)
73 {
74 	int ret = 0;
75 
76 	/* Use the BAR lock to protect the connection counts */
77 	nn_ctrl_bar_lock(nn);
78 	if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
79 		ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
80 		/* Undo the cnt adjustment if failed */
81 		if (ret)
82 			__nfp_net_tls_conn_cnt_changed(nn, -add, direction);
83 	}
84 	nn_ctrl_bar_unlock(nn);
85 
86 	return ret;
87 }
88 
89 static int
90 nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
91 {
92 	return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
93 }
94 
95 static int
96 nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
97 {
98 	return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
99 }
100 
101 static struct sk_buff *
102 nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
103 {
104 	return nfp_ccm_mbox_msg_alloc(nn, req_sz,
105 				      sizeof(struct nfp_crypto_reply_simple),
106 				      flags);
107 }
108 
109 static int
110 nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
111 			       const char *name, enum nfp_ccm_type type)
112 {
113 	struct nfp_crypto_reply_simple *reply;
114 	int err;
115 
116 	err = __nfp_ccm_mbox_communicate(nn, skb, type,
117 					 sizeof(*reply), sizeof(*reply),
118 					 type == NFP_CCM_TYPE_CRYPTO_DEL);
119 	if (err) {
120 		nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
121 		return err;
122 	}
123 
124 	reply = (void *)skb->data;
125 	err = -be32_to_cpu(reply->error);
126 	if (err)
127 		nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
128 			   name, err);
129 	dev_consume_skb_any(skb);
130 
131 	return err;
132 }
133 
134 static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
135 {
136 	struct nfp_crypto_req_del *req;
137 	struct sk_buff *skb;
138 
139 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
140 	if (!skb)
141 		return;
142 
143 	req = (void *)skb->data;
144 	req->ep_id = 0;
145 	memcpy(req->handle, fw_handle, sizeof(req->handle));
146 
147 	nfp_net_tls_communicate_simple(nn, skb, "delete",
148 				       NFP_CCM_TYPE_CRYPTO_DEL);
149 }
150 
151 static void
152 nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
153 {
154 	front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
155 					FIELD_PREP(NFP_NET_TLS_VLAN,
156 						   NFP_NET_TLS_VLAN_UNUSED));
157 }
158 
159 static void
160 nfp_net_tls_assign_conn_id(struct nfp_net *nn,
161 			   struct nfp_crypto_req_add_front *front)
162 {
163 	u32 len;
164 	u64 id;
165 
166 	id = atomic64_inc_return(&nn->ktls_conn_id_gen);
167 	len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
168 
169 	memcpy(front->l3_addrs, &id, sizeof(id));
170 	memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
171 }
172 
173 static struct nfp_crypto_req_add_back *
174 nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
175 		     struct sock *sk, int direction)
176 {
177 	struct inet_sock *inet = inet_sk(sk);
178 
179 	req->front.key_len += sizeof(__be32) * 2;
180 
181 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
182 		nfp_net_tls_assign_conn_id(nn, &req->front);
183 	} else {
184 		req->src_ip = inet->inet_daddr;
185 		req->dst_ip = inet->inet_saddr;
186 	}
187 
188 	return &req->back;
189 }
190 
191 static struct nfp_crypto_req_add_back *
192 nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
193 		     struct sock *sk, int direction)
194 {
195 #if IS_ENABLED(CONFIG_IPV6)
196 	struct ipv6_pinfo *np = inet6_sk(sk);
197 
198 	req->front.key_len += sizeof(struct in6_addr) * 2;
199 
200 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
201 		nfp_net_tls_assign_conn_id(nn, &req->front);
202 	} else {
203 		memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
204 		memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
205 	}
206 
207 #endif
208 	return &req->back;
209 }
210 
211 static void
212 nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
213 		   struct nfp_crypto_req_add_back *back, struct sock *sk,
214 		   int direction)
215 {
216 	struct inet_sock *inet = inet_sk(sk);
217 
218 	front->l4_proto = IPPROTO_TCP;
219 
220 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
221 		back->src_port = 0;
222 		back->dst_port = 0;
223 	} else {
224 		back->src_port = inet->inet_dport;
225 		back->dst_port = inet->inet_sport;
226 	}
227 }
228 
229 static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
230 {
231 	switch (direction) {
232 	case TLS_OFFLOAD_CTX_DIR_TX:
233 		return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
234 	case TLS_OFFLOAD_CTX_DIR_RX:
235 		return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
236 	default:
237 		WARN_ON_ONCE(1);
238 		return 0;
239 	}
240 }
241 
242 static bool
243 nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
244 			 enum tls_offload_ctx_dir direction)
245 {
246 	u8 bit;
247 
248 	switch (cipher_type) {
249 	case TLS_CIPHER_AES_GCM_128:
250 		if (direction == TLS_OFFLOAD_CTX_DIR_TX)
251 			bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
252 		else
253 			bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
254 		break;
255 	default:
256 		return false;
257 	}
258 
259 	return nn->tlv_caps.crypto_ops & BIT(bit);
260 }
261 
262 static int
263 nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
264 		enum tls_offload_ctx_dir direction,
265 		struct tls_crypto_info *crypto_info,
266 		u32 start_offload_tcp_sn)
267 {
268 	struct tls12_crypto_info_aes_gcm_128 *tls_ci;
269 	struct nfp_net *nn = netdev_priv(netdev);
270 	struct nfp_crypto_req_add_front *front;
271 	struct nfp_net_tls_offload_ctx *ntls;
272 	struct nfp_crypto_req_add_back *back;
273 	struct nfp_crypto_reply_add *reply;
274 	struct sk_buff *skb;
275 	size_t req_sz;
276 	void *req;
277 	bool ipv6;
278 	int err;
279 
280 	BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
281 		     TLS_DRIVER_STATE_SIZE_TX);
282 	BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
283 		     TLS_DRIVER_STATE_SIZE_RX);
284 
285 	if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
286 		return -EOPNOTSUPP;
287 
288 	switch (sk->sk_family) {
289 #if IS_ENABLED(CONFIG_IPV6)
290 	case AF_INET6:
291 		if (sk->sk_ipv6only ||
292 		    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
293 			req_sz = sizeof(struct nfp_crypto_req_add_v6);
294 			ipv6 = true;
295 			break;
296 		}
297 #endif
298 		/* fall through */
299 	case AF_INET:
300 		req_sz = sizeof(struct nfp_crypto_req_add_v4);
301 		ipv6 = false;
302 		break;
303 	default:
304 		return -EOPNOTSUPP;
305 	}
306 
307 	err = nfp_net_tls_conn_add(nn, direction);
308 	if (err)
309 		return err;
310 
311 	skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
312 	if (!skb) {
313 		err = -ENOMEM;
314 		goto err_conn_remove;
315 	}
316 
317 	front = (void *)skb->data;
318 	front->ep_id = 0;
319 	front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
320 	front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
321 	memset(front->resv, 0, sizeof(front->resv));
322 
323 	nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
324 
325 	req = (void *)skb->data;
326 	if (ipv6)
327 		back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
328 	else
329 		back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
330 
331 	nfp_net_tls_set_l4(front, back, sk, direction);
332 
333 	back->counter = 0;
334 	back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
335 
336 	tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
337 	memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
338 	memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
339 	       sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
340 	memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
341 	memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
342 	memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
343 
344 	/* Get an extra ref on the skb so we can wipe the key after */
345 	skb_get(skb);
346 
347 	err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
348 				       sizeof(*reply), sizeof(*reply));
349 	reply = (void *)skb->data;
350 
351 	/* We depend on CCM MBOX code not reallocating skb we sent
352 	 * so we can clear the key material out of the memory.
353 	 */
354 	if (!WARN_ON_ONCE((u8 *)back < skb->head ||
355 			  (u8 *)back > skb_end_pointer(skb)) &&
356 	    !WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
357 		memzero_explicit(back, sizeof(*back));
358 	dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
359 
360 	if (err) {
361 		nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
362 			   err, direction == TLS_OFFLOAD_CTX_DIR_TX);
363 		/* communicate frees skb on error */
364 		goto err_conn_remove;
365 	}
366 
367 	err = -be32_to_cpu(reply->error);
368 	if (err) {
369 		if (err == -ENOSPC) {
370 			if (!atomic_fetch_inc(&nn->ktls_no_space))
371 				nn_info(nn, "HW TLS table full\n");
372 		} else {
373 			nn_dp_warn(&nn->dp,
374 				   "failed to add TLS, FW replied: %d\n", err);
375 		}
376 		goto err_free_skb;
377 	}
378 
379 	if (!reply->handle[0] && !reply->handle[1]) {
380 		nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
381 		err = -EINVAL;
382 		goto err_fw_remove;
383 	}
384 
385 	ntls = tls_driver_ctx(sk, direction);
386 	memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
387 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
388 		ntls->next_seq = start_offload_tcp_sn;
389 	dev_consume_skb_any(skb);
390 
391 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
392 		return 0;
393 
394 	tls_offload_rx_resync_set_type(sk,
395 				       TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
396 	return 0;
397 
398 err_fw_remove:
399 	nfp_net_tls_del_fw(nn, reply->handle);
400 err_free_skb:
401 	dev_consume_skb_any(skb);
402 err_conn_remove:
403 	nfp_net_tls_conn_remove(nn, direction);
404 	return err;
405 }
406 
407 static void
408 nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
409 		enum tls_offload_ctx_dir direction)
410 {
411 	struct nfp_net *nn = netdev_priv(netdev);
412 	struct nfp_net_tls_offload_ctx *ntls;
413 
414 	nfp_net_tls_conn_remove(nn, direction);
415 
416 	ntls = __tls_driver_ctx(tls_ctx, direction);
417 	nfp_net_tls_del_fw(nn, ntls->fw_handle);
418 }
419 
420 static int
421 nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
422 		   u8 *rcd_sn, enum tls_offload_ctx_dir direction)
423 {
424 	struct nfp_net *nn = netdev_priv(netdev);
425 	struct nfp_net_tls_offload_ctx *ntls;
426 	struct nfp_crypto_req_update *req;
427 	struct sk_buff *skb;
428 	gfp_t flags;
429 	int err;
430 
431 	flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
432 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
433 	if (!skb)
434 		return -ENOMEM;
435 
436 	ntls = tls_driver_ctx(sk, direction);
437 	req = (void *)skb->data;
438 	req->ep_id = 0;
439 	req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
440 	memset(req->resv, 0, sizeof(req->resv));
441 	memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
442 	req->tcp_seq = cpu_to_be32(seq);
443 	memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
444 
445 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
446 		err = nfp_net_tls_communicate_simple(nn, skb, "sync",
447 						     NFP_CCM_TYPE_CRYPTO_UPDATE);
448 		if (err)
449 			return err;
450 		ntls->next_seq = seq;
451 	} else {
452 		nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
453 				  sizeof(struct nfp_crypto_reply_simple));
454 	}
455 
456 	return 0;
457 }
458 
459 static const struct tlsdev_ops nfp_net_tls_ops = {
460 	.tls_dev_add = nfp_net_tls_add,
461 	.tls_dev_del = nfp_net_tls_del,
462 	.tls_dev_resync = nfp_net_tls_resync,
463 };
464 
465 static int nfp_net_tls_reset(struct nfp_net *nn)
466 {
467 	struct nfp_crypto_req_reset *req;
468 	struct sk_buff *skb;
469 
470 	skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
471 	if (!skb)
472 		return -ENOMEM;
473 
474 	req = (void *)skb->data;
475 	req->ep_id = 0;
476 
477 	return nfp_net_tls_communicate_simple(nn, skb, "reset",
478 					      NFP_CCM_TYPE_CRYPTO_RESET);
479 }
480 
481 int nfp_net_tls_init(struct nfp_net *nn)
482 {
483 	struct net_device *netdev = nn->dp.netdev;
484 	int err;
485 
486 	if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
487 		return 0;
488 
489 	if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
490 	    NFP_NET_TLS_CCM_MBOX_OPS_MASK)
491 		return 0;
492 
493 	if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
494 		nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
495 			nn->tlv_caps.mbox_len);
496 		return 0;
497 	}
498 
499 	err = nfp_net_tls_reset(nn);
500 	if (err)
501 		return err;
502 
503 	nn_ctrl_bar_lock(nn);
504 	nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
505 	err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
506 	nn_ctrl_bar_unlock(nn);
507 	if (err)
508 		return err;
509 
510 	if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
511 		netdev->hw_features |= NETIF_F_HW_TLS_RX;
512 		netdev->features |= NETIF_F_HW_TLS_RX;
513 	}
514 	if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
515 		netdev->hw_features |= NETIF_F_HW_TLS_TX;
516 		netdev->features |= NETIF_F_HW_TLS_TX;
517 	}
518 
519 	netdev->tlsdev_ops = &nfp_net_tls_ops;
520 
521 	return 0;
522 }
523