1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MACSEC hardware offload driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #include <linux/rtnetlink.h>
8 #include <linux/bitfield.h>
9 #include <net/macsec.h>
10 #include "otx2_common.h"
11 
12 #define MCS_TCAM0_MAC_SA_MASK		GENMASK_ULL(63, 48)
13 #define MCS_TCAM1_MAC_SA_MASK		GENMASK_ULL(31, 0)
14 #define MCS_TCAM1_ETYPE_MASK		GENMASK_ULL(47, 32)
15 
16 #define MCS_SA_MAP_MEM_SA_USE		BIT_ULL(9)
17 
18 #define MCS_RX_SECY_PLCY_RW_MASK	GENMASK_ULL(49, 18)
19 #define MCS_RX_SECY_PLCY_RP		BIT_ULL(17)
20 #define MCS_RX_SECY_PLCY_AUTH_ENA	BIT_ULL(16)
21 #define MCS_RX_SECY_PLCY_CIP		GENMASK_ULL(8, 5)
22 #define MCS_RX_SECY_PLCY_VAL		GENMASK_ULL(2, 1)
23 #define MCS_RX_SECY_PLCY_ENA		BIT_ULL(0)
24 
25 #define MCS_TX_SECY_PLCY_MTU		GENMASK_ULL(43, 28)
26 #define MCS_TX_SECY_PLCY_ST_TCI		GENMASK_ULL(27, 22)
27 #define MCS_TX_SECY_PLCY_ST_OFFSET	GENMASK_ULL(21, 15)
28 #define MCS_TX_SECY_PLCY_INS_MODE	BIT_ULL(14)
29 #define MCS_TX_SECY_PLCY_AUTH_ENA	BIT_ULL(13)
30 #define MCS_TX_SECY_PLCY_CIP		GENMASK_ULL(5, 2)
31 #define MCS_TX_SECY_PLCY_PROTECT	BIT_ULL(1)
32 #define MCS_TX_SECY_PLCY_ENA		BIT_ULL(0)
33 
34 #define MCS_GCM_AES_128			0
35 #define MCS_GCM_AES_256			1
36 #define MCS_GCM_AES_XPN_128		2
37 #define MCS_GCM_AES_XPN_256		3
38 
39 #define MCS_TCI_ES			0x40 /* end station */
40 #define MCS_TCI_SC			0x20 /* SCI present */
41 #define MCS_TCI_SCB			0x10 /* epon */
42 #define MCS_TCI_E			0x08 /* encryption */
43 #define MCS_TCI_C			0x04 /* changed text */
44 
45 static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
46 						 struct macsec_secy *secy)
47 {
48 	struct cn10k_mcs_txsc *txsc;
49 
50 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
51 		if (txsc->sw_secy == secy)
52 			return txsc;
53 	}
54 
55 	return NULL;
56 }
57 
58 static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
59 						 struct macsec_secy *secy,
60 						 struct macsec_rx_sc *rx_sc)
61 {
62 	struct cn10k_mcs_rxsc *rxsc;
63 
64 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
65 		if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
66 			return rxsc;
67 	}
68 
69 	return NULL;
70 }
71 
72 static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
73 {
74 	switch (rsrc_type) {
75 	case MCS_RSRC_TYPE_FLOWID:
76 		return "FLOW";
77 	case MCS_RSRC_TYPE_SC:
78 		return "SC";
79 	case MCS_RSRC_TYPE_SECY:
80 		return "SECY";
81 	case MCS_RSRC_TYPE_SA:
82 		return "SA";
83 	default:
84 		return "Unknown";
85 	};
86 
87 	return "Unknown";
88 }
89 
90 static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
91 				enum mcs_rsrc_type type, u16 *rsrc_id)
92 {
93 	struct mbox *mbox = &pfvf->mbox;
94 	struct mcs_alloc_rsrc_req *req;
95 	struct mcs_alloc_rsrc_rsp *rsp;
96 	int ret = -ENOMEM;
97 
98 	mutex_lock(&mbox->lock);
99 
100 	req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
101 	if (!req)
102 		goto fail;
103 
104 	req->rsrc_type = type;
105 	req->rsrc_cnt  = 1;
106 	req->dir = dir;
107 
108 	ret = otx2_sync_mbox_msg(mbox);
109 	if (ret)
110 		goto fail;
111 
112 	rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
113 							     0, &req->hdr);
114 	if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
115 	    req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
116 		ret = -EINVAL;
117 		goto fail;
118 	}
119 
120 	switch (rsp->rsrc_type) {
121 	case MCS_RSRC_TYPE_FLOWID:
122 		*rsrc_id = rsp->flow_ids[0];
123 		break;
124 	case MCS_RSRC_TYPE_SC:
125 		*rsrc_id = rsp->sc_ids[0];
126 		break;
127 	case MCS_RSRC_TYPE_SECY:
128 		*rsrc_id = rsp->secy_ids[0];
129 		break;
130 	case MCS_RSRC_TYPE_SA:
131 		*rsrc_id = rsp->sa_ids[0];
132 		break;
133 	default:
134 		ret = -EINVAL;
135 		goto fail;
136 	}
137 
138 	mutex_unlock(&mbox->lock);
139 
140 	return 0;
141 fail:
142 	dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
143 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
144 	mutex_unlock(&mbox->lock);
145 	return ret;
146 }
147 
148 static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
149 				enum mcs_rsrc_type type, u16 hw_rsrc_id,
150 				bool all)
151 {
152 	struct mbox *mbox = &pfvf->mbox;
153 	struct mcs_free_rsrc_req *req;
154 
155 	mutex_lock(&mbox->lock);
156 
157 	req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
158 	if (!req)
159 		goto fail;
160 
161 	req->rsrc_id = hw_rsrc_id;
162 	req->rsrc_type = type;
163 	req->dir = dir;
164 	if (all)
165 		req->all = 1;
166 
167 	if (otx2_sync_mbox_msg(&pfvf->mbox))
168 		goto fail;
169 
170 	mutex_unlock(&mbox->lock);
171 
172 	return;
173 fail:
174 	dev_err(pfvf->dev, "Failed to free %s %s resource\n",
175 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
176 	mutex_unlock(&mbox->lock);
177 }
178 
179 static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
180 {
181 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
182 }
183 
184 static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
185 {
186 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
187 }
188 
189 static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
190 {
191 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
192 }
193 
194 static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
195 {
196 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
197 }
198 
199 static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
200 				   struct macsec_secy *secy, u8 hw_secy_id)
201 {
202 	struct mcs_secy_plcy_write_req *req;
203 	struct mbox *mbox = &pfvf->mbox;
204 	u64 policy;
205 	int ret;
206 
207 	mutex_lock(&mbox->lock);
208 
209 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
210 	if (!req) {
211 		ret = -ENOMEM;
212 		goto fail;
213 	}
214 
215 	policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
216 	if (secy->replay_protect)
217 		policy |= MCS_RX_SECY_PLCY_RP;
218 
219 	policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
220 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
221 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
222 
223 	policy |= MCS_RX_SECY_PLCY_ENA;
224 
225 	req->plcy = policy;
226 	req->secy_id = hw_secy_id;
227 	req->dir = MCS_RX;
228 
229 	ret = otx2_sync_mbox_msg(mbox);
230 
231 fail:
232 	mutex_unlock(&mbox->lock);
233 	return ret;
234 }
235 
236 static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
237 				     struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
238 {
239 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
240 	struct mcs_flowid_entry_write_req *req;
241 	struct mbox *mbox = &pfvf->mbox;
242 	int ret;
243 
244 	mutex_lock(&mbox->lock);
245 
246 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
247 	if (!req) {
248 		ret = -ENOMEM;
249 		goto fail;
250 	}
251 
252 	req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
253 	req->mask[1] = ~0ULL;
254 	req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
255 
256 	req->mask[0] = ~0ULL;
257 	req->mask[2] = ~0ULL;
258 	req->mask[3] = ~0ULL;
259 
260 	req->flow_id = rxsc->hw_flow_id;
261 	req->secy_id = hw_secy_id;
262 	req->sc_id = rxsc->hw_sc_id;
263 	req->dir = MCS_RX;
264 
265 	if (sw_rx_sc->active)
266 		req->ena = 1;
267 
268 	ret = otx2_sync_mbox_msg(mbox);
269 
270 fail:
271 	mutex_unlock(&mbox->lock);
272 	return ret;
273 }
274 
275 static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
276 				  struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
277 {
278 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
279 	struct mcs_rx_sc_cam_write_req *sc_req;
280 	struct mbox *mbox = &pfvf->mbox;
281 	int ret;
282 
283 	mutex_lock(&mbox->lock);
284 
285 	sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
286 	if (!sc_req) {
287 		ret = -ENOMEM;
288 		goto fail;
289 	}
290 
291 	sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
292 	sc_req->sc_id = rxsc->hw_sc_id;
293 	sc_req->secy_id = hw_secy_id;
294 
295 	ret = otx2_sync_mbox_msg(mbox);
296 
297 fail:
298 	mutex_unlock(&mbox->lock);
299 	return ret;
300 }
301 
302 static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
303 				      struct macsec_secy *secy,
304 				      struct cn10k_mcs_rxsc *rxsc,
305 				      u8 assoc_num, bool sa_in_use)
306 {
307 	unsigned char *src = rxsc->sa_key[assoc_num];
308 	struct mcs_sa_plcy_write_req *plcy_req;
309 	struct mcs_rx_sc_sa_map *map_req;
310 	struct mbox *mbox = &pfvf->mbox;
311 	u8 reg, key_len;
312 	int ret;
313 
314 	mutex_lock(&mbox->lock);
315 
316 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
317 	if (!plcy_req) {
318 		ret = -ENOMEM;
319 		goto fail;
320 	}
321 
322 	map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
323 	if (!map_req) {
324 		otx2_mbox_reset(&mbox->mbox, 0);
325 		ret = -ENOMEM;
326 		goto fail;
327 	}
328 
329 	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
330 		memcpy((u8 *)&plcy_req->plcy[0][reg],
331 		       (src + reg * 8), 8);
332 		reg++;
333 	}
334 
335 	plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
336 	plcy_req->sa_cnt = 1;
337 	plcy_req->dir = MCS_RX;
338 
339 	map_req->sa_index = rxsc->hw_sa_id[assoc_num];
340 	map_req->sa_in_use = sa_in_use;
341 	map_req->sc_id = rxsc->hw_sc_id;
342 	map_req->an = assoc_num;
343 
344 	/* Send two messages together */
345 	ret = otx2_sync_mbox_msg(mbox);
346 
347 fail:
348 	mutex_unlock(&mbox->lock);
349 	return ret;
350 }
351 
352 static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
353 				    struct cn10k_mcs_rxsc *rxsc,
354 				    u8 assoc_num, u64 next_pn)
355 {
356 	struct mcs_pn_table_write_req *req;
357 	struct mbox *mbox = &pfvf->mbox;
358 	int ret;
359 
360 	mutex_lock(&mbox->lock);
361 
362 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
363 	if (!req) {
364 		ret = -ENOMEM;
365 		goto fail;
366 	}
367 
368 	req->pn_id = rxsc->hw_sa_id[assoc_num];
369 	req->next_pn = next_pn;
370 	req->dir = MCS_RX;
371 
372 	ret = otx2_sync_mbox_msg(mbox);
373 
374 fail:
375 	mutex_unlock(&mbox->lock);
376 	return ret;
377 }
378 
379 static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
380 				   struct macsec_secy *secy,
381 				   struct cn10k_mcs_txsc *txsc)
382 {
383 	struct mcs_secy_plcy_write_req *req;
384 	struct mbox *mbox = &pfvf->mbox;
385 	struct macsec_tx_sc *sw_tx_sc;
386 	/* Insert SecTag after 12 bytes (DA+SA)*/
387 	u8 tag_offset = 12;
388 	u8 sectag_tci = 0;
389 	u64 policy;
390 	int ret;
391 
392 	sw_tx_sc = &secy->tx_sc;
393 
394 	mutex_lock(&mbox->lock);
395 
396 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
397 	if (!req) {
398 		ret = -ENOMEM;
399 		goto fail;
400 	}
401 
402 	if (sw_tx_sc->send_sci) {
403 		sectag_tci |= MCS_TCI_SC;
404 	} else {
405 		if (sw_tx_sc->end_station)
406 			sectag_tci |= MCS_TCI_ES;
407 		if (sw_tx_sc->scb)
408 			sectag_tci |= MCS_TCI_SCB;
409 	}
410 
411 	if (sw_tx_sc->encrypt)
412 		sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
413 
414 	policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
415 	/* Write SecTag excluding AN bits(1..0) */
416 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
417 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
418 	policy |= MCS_TX_SECY_PLCY_INS_MODE;
419 	policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
420 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
421 
422 	if (secy->protect_frames)
423 		policy |= MCS_TX_SECY_PLCY_PROTECT;
424 
425 	/* If the encodingsa does not exist/active and protect is
426 	 * not set then frames can be sent out as it is. Hence enable
427 	 * the policy irrespective of secy operational when !protect.
428 	 */
429 	if (!secy->protect_frames || secy->operational)
430 		policy |= MCS_TX_SECY_PLCY_ENA;
431 
432 	req->plcy = policy;
433 	req->secy_id = txsc->hw_secy_id_tx;
434 	req->dir = MCS_TX;
435 
436 	ret = otx2_sync_mbox_msg(mbox);
437 
438 fail:
439 	mutex_unlock(&mbox->lock);
440 	return ret;
441 }
442 
443 static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
444 				     struct macsec_secy *secy,
445 				     struct cn10k_mcs_txsc *txsc)
446 {
447 	struct mcs_flowid_entry_write_req *req;
448 	struct mbox *mbox = &pfvf->mbox;
449 	u64 mac_sa;
450 	int ret;
451 
452 	mutex_lock(&mbox->lock);
453 
454 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
455 	if (!req) {
456 		ret = -ENOMEM;
457 		goto fail;
458 	}
459 
460 	mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
461 
462 	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
463 	req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
464 
465 	req->mask[0] = ~0ULL;
466 	req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
467 
468 	req->mask[1] = ~0ULL;
469 	req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
470 
471 	req->mask[2] = ~0ULL;
472 	req->mask[3] = ~0ULL;
473 
474 	req->flow_id = txsc->hw_flow_id;
475 	req->secy_id = txsc->hw_secy_id_tx;
476 	req->sc_id = txsc->hw_sc_id;
477 	req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
478 	req->dir = MCS_TX;
479 	/* This can be enabled since stack xmits packets only when interface is up */
480 	req->ena = 1;
481 
482 	ret = otx2_sync_mbox_msg(mbox);
483 
484 fail:
485 	mutex_unlock(&mbox->lock);
486 	return ret;
487 }
488 
489 static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
490 				   struct macsec_secy *secy,
491 				   struct cn10k_mcs_txsc *txsc,
492 				   u8 sa_num, bool sa_active)
493 {
494 	struct mcs_tx_sc_sa_map *map_req;
495 	struct mbox *mbox = &pfvf->mbox;
496 	int ret;
497 
498 	/* Link the encoding_sa only to SC out of all SAs */
499 	if (txsc->encoding_sa != sa_num)
500 		return 0;
501 
502 	mutex_lock(&mbox->lock);
503 
504 	map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
505 	if (!map_req) {
506 		otx2_mbox_reset(&mbox->mbox, 0);
507 		ret = -ENOMEM;
508 		goto fail;
509 	}
510 
511 	map_req->sa_index0 = txsc->hw_sa_id[sa_num];
512 	map_req->sa_index0_vld = sa_active;
513 	map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
514 	map_req->sc_id = txsc->hw_sc_id;
515 
516 	ret = otx2_sync_mbox_msg(mbox);
517 
518 fail:
519 	mutex_unlock(&mbox->lock);
520 	return ret;
521 }
522 
523 static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
524 				      struct macsec_secy *secy,
525 				      struct cn10k_mcs_txsc *txsc,
526 				      u8 assoc_num)
527 {
528 	unsigned char *src = txsc->sa_key[assoc_num];
529 	struct mcs_sa_plcy_write_req *plcy_req;
530 	struct mbox *mbox = &pfvf->mbox;
531 	u8 reg, key_len;
532 	int ret;
533 
534 	mutex_lock(&mbox->lock);
535 
536 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
537 	if (!plcy_req) {
538 		ret = -ENOMEM;
539 		goto fail;
540 	}
541 
542 	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
543 		memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
544 		reg++;
545 	}
546 
547 	plcy_req->plcy[0][8] = assoc_num;
548 	plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
549 	plcy_req->sa_cnt = 1;
550 	plcy_req->dir = MCS_TX;
551 
552 	ret = otx2_sync_mbox_msg(mbox);
553 
554 fail:
555 	mutex_unlock(&mbox->lock);
556 	return ret;
557 }
558 
559 static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
560 				struct cn10k_mcs_txsc *txsc,
561 				u8 assoc_num, u64 next_pn)
562 {
563 	struct mcs_pn_table_write_req *req;
564 	struct mbox *mbox = &pfvf->mbox;
565 	int ret;
566 
567 	mutex_lock(&mbox->lock);
568 
569 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
570 	if (!req) {
571 		ret = -ENOMEM;
572 		goto fail;
573 	}
574 
575 	req->pn_id = txsc->hw_sa_id[assoc_num];
576 	req->next_pn = next_pn;
577 	req->dir = MCS_TX;
578 
579 	ret = otx2_sync_mbox_msg(mbox);
580 
581 fail:
582 	mutex_unlock(&mbox->lock);
583 	return ret;
584 }
585 
586 static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
587 				    bool enable, enum mcs_direction dir)
588 {
589 	struct mcs_flowid_ena_dis_entry *req;
590 	struct mbox *mbox = &pfvf->mbox;
591 	int ret;
592 
593 	mutex_lock(&mbox->lock);
594 
595 	req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
596 	if (!req) {
597 		ret = -ENOMEM;
598 		goto fail;
599 	}
600 
601 	req->flow_id = hw_flow_id;
602 	req->ena = enable;
603 	req->dir = dir;
604 
605 	ret = otx2_sync_mbox_msg(mbox);
606 
607 fail:
608 	mutex_unlock(&mbox->lock);
609 	return ret;
610 }
611 
612 static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
613 			      struct mcs_sa_stats *rsp_p,
614 			      enum mcs_direction dir, bool clear)
615 {
616 	struct mcs_clear_stats *clear_req;
617 	struct mbox *mbox = &pfvf->mbox;
618 	struct mcs_stats_req *req;
619 	struct mcs_sa_stats *rsp;
620 	int ret;
621 
622 	mutex_lock(&mbox->lock);
623 
624 	req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
625 	if (!req) {
626 		ret = -ENOMEM;
627 		goto fail;
628 	}
629 
630 	req->id = hw_sa_id;
631 	req->dir = dir;
632 
633 	if (!clear)
634 		goto send_msg;
635 
636 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
637 	if (!clear_req) {
638 		ret = -ENOMEM;
639 		goto fail;
640 	}
641 	clear_req->id = hw_sa_id;
642 	clear_req->dir = dir;
643 	clear_req->type = MCS_RSRC_TYPE_SA;
644 
645 send_msg:
646 	ret = otx2_sync_mbox_msg(mbox);
647 	if (ret)
648 		goto fail;
649 
650 	rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
651 						       0, &req->hdr);
652 	if (IS_ERR(rsp)) {
653 		ret = PTR_ERR(rsp);
654 		goto fail;
655 	}
656 
657 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
658 
659 	mutex_unlock(&mbox->lock);
660 
661 	return 0;
662 fail:
663 	mutex_unlock(&mbox->lock);
664 	return ret;
665 }
666 
667 static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
668 			      struct mcs_sc_stats *rsp_p,
669 			      enum mcs_direction dir, bool clear)
670 {
671 	struct mcs_clear_stats *clear_req;
672 	struct mbox *mbox = &pfvf->mbox;
673 	struct mcs_stats_req *req;
674 	struct mcs_sc_stats *rsp;
675 	int ret;
676 
677 	mutex_lock(&mbox->lock);
678 
679 	req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
680 	if (!req) {
681 		ret = -ENOMEM;
682 		goto fail;
683 	}
684 
685 	req->id = hw_sc_id;
686 	req->dir = dir;
687 
688 	if (!clear)
689 		goto send_msg;
690 
691 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
692 	if (!clear_req) {
693 		ret = -ENOMEM;
694 		goto fail;
695 	}
696 	clear_req->id = hw_sc_id;
697 	clear_req->dir = dir;
698 	clear_req->type = MCS_RSRC_TYPE_SC;
699 
700 send_msg:
701 	ret = otx2_sync_mbox_msg(mbox);
702 	if (ret)
703 		goto fail;
704 
705 	rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
706 						       0, &req->hdr);
707 	if (IS_ERR(rsp)) {
708 		ret = PTR_ERR(rsp);
709 		goto fail;
710 	}
711 
712 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
713 
714 	mutex_unlock(&mbox->lock);
715 
716 	return 0;
717 fail:
718 	mutex_unlock(&mbox->lock);
719 	return ret;
720 }
721 
722 static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
723 				struct mcs_secy_stats *rsp_p,
724 				enum mcs_direction dir, bool clear)
725 {
726 	struct mcs_clear_stats *clear_req;
727 	struct mbox *mbox = &pfvf->mbox;
728 	struct mcs_secy_stats *rsp;
729 	struct mcs_stats_req *req;
730 	int ret;
731 
732 	mutex_lock(&mbox->lock);
733 
734 	req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
735 	if (!req) {
736 		ret = -ENOMEM;
737 		goto fail;
738 	}
739 
740 	req->id = hw_secy_id;
741 	req->dir = dir;
742 
743 	if (!clear)
744 		goto send_msg;
745 
746 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
747 	if (!clear_req) {
748 		ret = -ENOMEM;
749 		goto fail;
750 	}
751 	clear_req->id = hw_secy_id;
752 	clear_req->dir = dir;
753 	clear_req->type = MCS_RSRC_TYPE_SECY;
754 
755 send_msg:
756 	ret = otx2_sync_mbox_msg(mbox);
757 	if (ret)
758 		goto fail;
759 
760 	rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
761 							 0, &req->hdr);
762 	if (IS_ERR(rsp)) {
763 		ret = PTR_ERR(rsp);
764 		goto fail;
765 	}
766 
767 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
768 
769 	mutex_unlock(&mbox->lock);
770 
771 	return 0;
772 fail:
773 	mutex_unlock(&mbox->lock);
774 	return ret;
775 }
776 
777 static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
778 {
779 	struct cn10k_mcs_txsc *txsc;
780 	int ret;
781 
782 	txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
783 	if (!txsc)
784 		return ERR_PTR(-ENOMEM);
785 
786 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
787 				   &txsc->hw_flow_id);
788 	if (ret)
789 		goto fail;
790 
791 	/* For a SecY, one TX secy and one RX secy HW resources are needed */
792 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
793 				   &txsc->hw_secy_id_tx);
794 	if (ret)
795 		goto free_flowid;
796 
797 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
798 				   &txsc->hw_secy_id_rx);
799 	if (ret)
800 		goto free_tx_secy;
801 
802 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
803 				   &txsc->hw_sc_id);
804 	if (ret)
805 		goto free_rx_secy;
806 
807 	return txsc;
808 free_rx_secy:
809 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
810 			    txsc->hw_secy_id_rx, false);
811 free_tx_secy:
812 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
813 			    txsc->hw_secy_id_tx, false);
814 free_flowid:
815 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
816 			    txsc->hw_flow_id, false);
817 fail:
818 	return ERR_PTR(ret);
819 }
820 
821 /* Free Tx SC and its SAs(if any) resources to AF
822  */
823 static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
824 				  struct cn10k_mcs_txsc *txsc)
825 {
826 	u8 sa_bmap = txsc->sa_bmap;
827 	u8 sa_num = 0;
828 
829 	while (sa_bmap) {
830 		if (sa_bmap & 1) {
831 			cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
832 						   txsc, sa_num);
833 			cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
834 		}
835 		sa_num++;
836 		sa_bmap >>= 1;
837 	}
838 
839 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
840 			    txsc->hw_sc_id, false);
841 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
842 			    txsc->hw_secy_id_rx, false);
843 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
844 			    txsc->hw_secy_id_tx, false);
845 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
846 			    txsc->hw_flow_id, false);
847 }
848 
849 static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
850 {
851 	struct cn10k_mcs_rxsc *rxsc;
852 	int ret;
853 
854 	rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
855 	if (!rxsc)
856 		return ERR_PTR(-ENOMEM);
857 
858 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
859 				   &rxsc->hw_flow_id);
860 	if (ret)
861 		goto fail;
862 
863 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
864 				   &rxsc->hw_sc_id);
865 	if (ret)
866 		goto free_flowid;
867 
868 	return rxsc;
869 free_flowid:
870 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
871 			    rxsc->hw_flow_id, false);
872 fail:
873 	return ERR_PTR(ret);
874 }
875 
876 /* Free Rx SC and its SAs(if any) resources to AF
877  */
878 static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
879 				  struct cn10k_mcs_rxsc *rxsc)
880 {
881 	u8 sa_bmap = rxsc->sa_bmap;
882 	u8 sa_num = 0;
883 
884 	while (sa_bmap) {
885 		if (sa_bmap & 1) {
886 			cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
887 						   sa_num, false);
888 			cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
889 		}
890 		sa_num++;
891 		sa_bmap >>= 1;
892 	}
893 
894 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
895 			    rxsc->hw_sc_id, false);
896 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
897 			    rxsc->hw_flow_id, false);
898 }
899 
900 static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
901 				 struct cn10k_mcs_txsc *txsc,
902 				 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
903 {
904 	if (sw_tx_sa) {
905 		cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
906 		cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
907 				     sw_tx_sa->next_pn_halves.lower);
908 		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
909 					sw_tx_sa->active);
910 	}
911 
912 	cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
913 	cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
914 	/* When updating secy, change RX secy also */
915 	cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
916 
917 	return 0;
918 }
919 
920 static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
921 				 struct macsec_secy *secy, u8 hw_secy_id)
922 {
923 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
924 	struct cn10k_mcs_rxsc *mcs_rx_sc;
925 	struct macsec_rx_sc *sw_rx_sc;
926 	struct macsec_rx_sa *sw_rx_sa;
927 	u8 sa_num;
928 
929 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
930 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
931 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
932 		if (unlikely(!mcs_rx_sc))
933 			continue;
934 
935 		for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
936 			sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
937 			if (!sw_rx_sa)
938 				continue;
939 
940 			cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
941 						   sa_num, sw_rx_sa->active);
942 			cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
943 						 sw_rx_sa->next_pn_halves.lower);
944 		}
945 
946 		cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
947 		cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
948 	}
949 
950 	return 0;
951 }
952 
953 static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
954 				   struct macsec_secy *secy,
955 				   bool delete)
956 {
957 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
958 	struct cn10k_mcs_rxsc *mcs_rx_sc;
959 	struct macsec_rx_sc *sw_rx_sc;
960 	int ret;
961 
962 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
963 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
964 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
965 		if (unlikely(!mcs_rx_sc))
966 			continue;
967 
968 		ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
969 					       false, MCS_RX);
970 		if (ret)
971 			dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
972 				mcs_rx_sc->hw_sc_id);
973 		if (delete) {
974 			cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
975 			list_del(&mcs_rx_sc->entry);
976 			kfree(mcs_rx_sc);
977 		}
978 	}
979 
980 	return 0;
981 }
982 
983 static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
984 				 struct cn10k_mcs_txsc *txsc)
985 {
986 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
987 	struct mcs_secy_stats rx_rsp = { 0 };
988 	struct mcs_sc_stats sc_rsp = { 0 };
989 	struct cn10k_mcs_rxsc *rxsc;
990 
991 	/* Because of shared counters for some stats in the hardware, when
992 	 * updating secy policy take a snapshot of current stats and reset them.
993 	 * Below are the effected stats because of shared counters.
994 	 */
995 
996 	/* Check if sync is really needed */
997 	if (secy->validate_frames == txsc->last_validate_frames &&
998 	    secy->protect_frames == txsc->last_protect_frames)
999 		return;
1000 
1001 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1002 
1003 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1004 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1005 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1006 	if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
1007 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1008 	else
1009 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1010 
1011 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
1012 		cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
1013 
1014 		rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
1015 		rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
1016 
1017 		rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
1018 		rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
1019 
1020 		if (txsc->last_protect_frames)
1021 			rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
1022 		else
1023 			rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
1024 
1025 		if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
1026 			rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
1027 		else
1028 			rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
1029 	}
1030 
1031 	txsc->last_validate_frames = secy->validate_frames;
1032 	txsc->last_protect_frames = secy->protect_frames;
1033 }
1034 
1035 static int cn10k_mdo_open(struct macsec_context *ctx)
1036 {
1037 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1038 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1039 	struct macsec_secy *secy = ctx->secy;
1040 	struct macsec_tx_sa *sw_tx_sa;
1041 	struct cn10k_mcs_txsc *txsc;
1042 	u8 sa_num;
1043 	int err;
1044 
1045 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1046 	if (!txsc)
1047 		return -ENOENT;
1048 
1049 	sa_num = txsc->encoding_sa;
1050 	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1051 
1052 	err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1053 	if (err)
1054 		return err;
1055 
1056 	return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
1057 }
1058 
1059 static int cn10k_mdo_stop(struct macsec_context *ctx)
1060 {
1061 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1062 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1063 	struct cn10k_mcs_txsc *txsc;
1064 	int err;
1065 
1066 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1067 	if (!txsc)
1068 		return -ENOENT;
1069 
1070 	err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1071 	if (err)
1072 		return err;
1073 
1074 	return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
1075 }
1076 
1077 static int cn10k_mdo_add_secy(struct macsec_context *ctx)
1078 {
1079 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1080 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1081 	struct macsec_secy *secy = ctx->secy;
1082 	struct cn10k_mcs_txsc *txsc;
1083 
1084 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
1085 		return -EOPNOTSUPP;
1086 
1087 	/* Stick to 16 bytes key len until XPN support is added */
1088 	if (secy->key_len != 16)
1089 		return -EOPNOTSUPP;
1090 
1091 	if (secy->xpn)
1092 		return -EOPNOTSUPP;
1093 
1094 	txsc = cn10k_mcs_create_txsc(pfvf);
1095 	if (IS_ERR(txsc))
1096 		return -ENOSPC;
1097 
1098 	txsc->sw_secy = secy;
1099 	txsc->encoding_sa = secy->tx_sc.encoding_sa;
1100 	txsc->last_validate_frames = secy->validate_frames;
1101 	txsc->last_protect_frames = secy->protect_frames;
1102 
1103 	list_add(&txsc->entry, &cfg->txsc_list);
1104 
1105 	if (netif_running(secy->netdev))
1106 		return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1107 
1108 	return 0;
1109 }
1110 
1111 static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
1112 {
1113 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1114 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1115 	struct macsec_secy *secy = ctx->secy;
1116 	struct macsec_tx_sa *sw_tx_sa;
1117 	struct cn10k_mcs_txsc *txsc;
1118 	u8 sa_num;
1119 	int err;
1120 
1121 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1122 	if (!txsc)
1123 		return -ENOENT;
1124 
1125 	txsc->encoding_sa = secy->tx_sc.encoding_sa;
1126 
1127 	sa_num = txsc->encoding_sa;
1128 	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1129 
1130 	if (netif_running(secy->netdev)) {
1131 		cn10k_mcs_sync_stats(pfvf, secy, txsc);
1132 
1133 		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1134 		if (err)
1135 			return err;
1136 	}
1137 
1138 	return 0;
1139 }
1140 
1141 static int cn10k_mdo_del_secy(struct macsec_context *ctx)
1142 {
1143 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1144 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1145 	struct cn10k_mcs_txsc *txsc;
1146 
1147 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1148 	if (!txsc)
1149 		return -ENOENT;
1150 
1151 	cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1152 	cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
1153 	cn10k_mcs_delete_txsc(pfvf, txsc);
1154 	list_del(&txsc->entry);
1155 	kfree(txsc);
1156 
1157 	return 0;
1158 }
1159 
1160 static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
1161 {
1162 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1163 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1164 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1165 	struct macsec_secy *secy = ctx->secy;
1166 	u8 sa_num = ctx->sa.assoc_num;
1167 	struct cn10k_mcs_txsc *txsc;
1168 	int err;
1169 
1170 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1171 	if (!txsc)
1172 		return -ENOENT;
1173 
1174 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1175 		return -EOPNOTSUPP;
1176 
1177 	if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
1178 		return -ENOSPC;
1179 
1180 	memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
1181 	txsc->sa_bmap |= 1 << sa_num;
1182 
1183 	if (netif_running(secy->netdev)) {
1184 		err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1185 		if (err)
1186 			return err;
1187 
1188 		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1189 					   sw_tx_sa->next_pn_halves.lower);
1190 		if (err)
1191 			return err;
1192 
1193 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1194 					      sa_num, sw_tx_sa->active);
1195 		if (err)
1196 			return err;
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
1203 {
1204 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1205 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1206 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1207 	struct macsec_secy *secy = ctx->secy;
1208 	u8 sa_num = ctx->sa.assoc_num;
1209 	struct cn10k_mcs_txsc *txsc;
1210 	int err;
1211 
1212 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1213 	if (!txsc)
1214 		return -ENOENT;
1215 
1216 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1217 		return -EOPNOTSUPP;
1218 
1219 	if (netif_running(secy->netdev)) {
1220 		/* Keys cannot be changed after creation */
1221 		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1222 					   sw_tx_sa->next_pn_halves.lower);
1223 		if (err)
1224 			return err;
1225 
1226 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1227 					      sa_num, sw_tx_sa->active);
1228 		if (err)
1229 			return err;
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
1236 {
1237 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1238 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1239 	u8 sa_num = ctx->sa.assoc_num;
1240 	struct cn10k_mcs_txsc *txsc;
1241 
1242 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1243 	if (!txsc)
1244 		return -ENOENT;
1245 
1246 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1247 		return -EOPNOTSUPP;
1248 
1249 	cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
1250 	txsc->sa_bmap &= ~(1 << sa_num);
1251 
1252 	return 0;
1253 }
1254 
1255 static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
1256 {
1257 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1258 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1259 	struct macsec_secy *secy = ctx->secy;
1260 	struct cn10k_mcs_rxsc *rxsc;
1261 	struct cn10k_mcs_txsc *txsc;
1262 	int err;
1263 
1264 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1265 	if (!txsc)
1266 		return -ENOENT;
1267 
1268 	rxsc = cn10k_mcs_create_rxsc(pfvf);
1269 	if (IS_ERR(rxsc))
1270 		return -ENOSPC;
1271 
1272 	rxsc->sw_secy = ctx->secy;
1273 	rxsc->sw_rxsc = ctx->rx_sc;
1274 	list_add(&rxsc->entry, &cfg->rxsc_list);
1275 
1276 	if (netif_running(secy->netdev)) {
1277 		err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
1278 		if (err)
1279 			return err;
1280 
1281 		err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
1282 		if (err)
1283 			return err;
1284 	}
1285 
1286 	return 0;
1287 }
1288 
1289 static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
1290 {
1291 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1292 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1293 	struct macsec_secy *secy = ctx->secy;
1294 	bool enable = ctx->rx_sc->active;
1295 	struct cn10k_mcs_rxsc *rxsc;
1296 
1297 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1298 	if (!rxsc)
1299 		return -ENOENT;
1300 
1301 	if (netif_running(secy->netdev))
1302 		return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
1303 						enable, MCS_RX);
1304 
1305 	return 0;
1306 }
1307 
1308 static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
1309 {
1310 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1311 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1312 	struct cn10k_mcs_rxsc *rxsc;
1313 
1314 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
1315 	if (!rxsc)
1316 		return -ENOENT;
1317 
1318 	cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
1319 	cn10k_mcs_delete_rxsc(pfvf, rxsc);
1320 	list_del(&rxsc->entry);
1321 	kfree(rxsc);
1322 
1323 	return 0;
1324 }
1325 
1326 static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
1327 {
1328 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1329 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1330 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1331 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1332 	u64 next_pn = rx_sa->next_pn_halves.lower;
1333 	struct macsec_secy *secy = ctx->secy;
1334 	bool sa_in_use = rx_sa->active;
1335 	u8 sa_num = ctx->sa.assoc_num;
1336 	struct cn10k_mcs_rxsc *rxsc;
1337 	int err;
1338 
1339 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1340 	if (!rxsc)
1341 		return -ENOENT;
1342 
1343 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1344 		return -EOPNOTSUPP;
1345 
1346 	if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
1347 		return -ENOSPC;
1348 
1349 	memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
1350 	rxsc->sa_bmap |= 1 << sa_num;
1351 
1352 	if (netif_running(secy->netdev)) {
1353 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
1354 						 sa_num, sa_in_use);
1355 		if (err)
1356 			return err;
1357 
1358 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
1359 		if (err)
1360 			return err;
1361 	}
1362 
1363 	return 0;
1364 }
1365 
1366 static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
1367 {
1368 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1369 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1370 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1371 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1372 	u64 next_pn = rx_sa->next_pn_halves.lower;
1373 	struct macsec_secy *secy = ctx->secy;
1374 	bool sa_in_use = rx_sa->active;
1375 	u8 sa_num = ctx->sa.assoc_num;
1376 	struct cn10k_mcs_rxsc *rxsc;
1377 	int err;
1378 
1379 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1380 	if (!rxsc)
1381 		return -ENOENT;
1382 
1383 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1384 		return -EOPNOTSUPP;
1385 
1386 	if (netif_running(secy->netdev)) {
1387 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
1388 		if (err)
1389 			return err;
1390 
1391 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
1392 		if (err)
1393 			return err;
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
1400 {
1401 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1402 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1403 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1404 	u8 sa_num = ctx->sa.assoc_num;
1405 	struct cn10k_mcs_rxsc *rxsc;
1406 
1407 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1408 	if (!rxsc)
1409 		return -ENOENT;
1410 
1411 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1412 		return -EOPNOTSUPP;
1413 
1414 	cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
1415 	cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1416 
1417 	rxsc->sa_bmap &= ~(1 << sa_num);
1418 
1419 	return 0;
1420 }
1421 
1422 static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
1423 {
1424 	struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
1425 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1426 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1427 	struct macsec_secy *secy = ctx->secy;
1428 	struct cn10k_mcs_txsc *txsc;
1429 
1430 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1431 	if (!txsc)
1432 		return -ENOENT;
1433 
1434 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
1435 	ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
1436 	ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
1437 
1438 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1439 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1440 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1441 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1442 	if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
1443 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1444 	else
1445 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1446 	txsc->stats.InPktsOverrun = 0;
1447 
1448 	ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
1449 	ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
1450 	ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
1451 	ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
1452 	ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
1453 	ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
1454 
1455 	return 0;
1456 }
1457 
1458 static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
1459 {
1460 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1461 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1462 	struct mcs_sc_stats rsp = { 0 };
1463 	struct cn10k_mcs_txsc *txsc;
1464 
1465 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1466 	if (!txsc)
1467 		return -ENOENT;
1468 
1469 	cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
1470 
1471 	ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1472 	ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1473 	ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
1474 	ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
1475 
1476 	return 0;
1477 }
1478 
1479 static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
1480 {
1481 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1482 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1483 	struct mcs_sa_stats rsp = { 0 };
1484 	u8 sa_num = ctx->sa.assoc_num;
1485 	struct cn10k_mcs_txsc *txsc;
1486 
1487 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1488 	if (!txsc)
1489 		return -ENOENT;
1490 
1491 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1492 		return -EOPNOTSUPP;
1493 
1494 	cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
1495 
1496 	ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1497 	ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1498 
1499 	return 0;
1500 }
1501 
1502 static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
1503 {
1504 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1505 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1506 	struct macsec_secy *secy = ctx->secy;
1507 	struct mcs_sc_stats rsp = { 0 };
1508 	struct cn10k_mcs_rxsc *rxsc;
1509 
1510 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1511 	if (!rxsc)
1512 		return -ENOENT;
1513 
1514 	cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
1515 
1516 	rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
1517 	rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
1518 
1519 	rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
1520 	rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
1521 
1522 	if (secy->protect_frames)
1523 		rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
1524 	else
1525 		rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
1526 
1527 	if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
1528 		rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
1529 	else
1530 		rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
1531 
1532 	ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
1533 	ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
1534 	ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
1535 	ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
1536 	ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
1537 	ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
1538 	ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
1539 	ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
1540 
1541 	return 0;
1542 }
1543 
1544 static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
1545 {
1546 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1547 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1548 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1549 	struct mcs_sa_stats rsp = { 0 };
1550 	u8 sa_num = ctx->sa.assoc_num;
1551 	struct cn10k_mcs_rxsc *rxsc;
1552 
1553 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1554 	if (!rxsc)
1555 		return -ENOENT;
1556 
1557 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1558 		return -EOPNOTSUPP;
1559 
1560 	cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
1561 
1562 	ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
1563 	ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
1564 	ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
1565 	ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
1566 	ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
1567 
1568 	return 0;
1569 }
1570 
1571 static const struct macsec_ops cn10k_mcs_ops = {
1572 	.mdo_dev_open = cn10k_mdo_open,
1573 	.mdo_dev_stop = cn10k_mdo_stop,
1574 	.mdo_add_secy = cn10k_mdo_add_secy,
1575 	.mdo_upd_secy = cn10k_mdo_upd_secy,
1576 	.mdo_del_secy = cn10k_mdo_del_secy,
1577 	.mdo_add_rxsc = cn10k_mdo_add_rxsc,
1578 	.mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
1579 	.mdo_del_rxsc = cn10k_mdo_del_rxsc,
1580 	.mdo_add_rxsa = cn10k_mdo_add_rxsa,
1581 	.mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
1582 	.mdo_del_rxsa = cn10k_mdo_del_rxsa,
1583 	.mdo_add_txsa = cn10k_mdo_add_txsa,
1584 	.mdo_upd_txsa = cn10k_mdo_upd_txsa,
1585 	.mdo_del_txsa = cn10k_mdo_del_txsa,
1586 	.mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
1587 	.mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
1588 	.mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
1589 	.mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
1590 	.mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
1591 };
1592 
1593 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
1594 {
1595 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1596 	struct macsec_tx_sa *sw_tx_sa = NULL;
1597 	struct macsec_secy *secy = NULL;
1598 	struct cn10k_mcs_txsc *txsc;
1599 	u8 an;
1600 
1601 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1602 		return;
1603 
1604 	if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
1605 		return;
1606 
1607 	/* Find the SecY to which the expired hardware SA is mapped */
1608 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
1609 		for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
1610 			if (txsc->hw_sa_id[an] == event->sa_id) {
1611 				secy = txsc->sw_secy;
1612 				sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
1613 			}
1614 	}
1615 
1616 	if (secy && sw_tx_sa)
1617 		macsec_pn_wrapped(secy, sw_tx_sa);
1618 }
1619 
1620 int cn10k_mcs_init(struct otx2_nic *pfvf)
1621 {
1622 	struct mbox *mbox = &pfvf->mbox;
1623 	struct cn10k_mcs_cfg *cfg;
1624 	struct mcs_intr_cfg *req;
1625 
1626 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1627 		return 0;
1628 
1629 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1630 	if (!cfg)
1631 		return -ENOMEM;
1632 
1633 	INIT_LIST_HEAD(&cfg->txsc_list);
1634 	INIT_LIST_HEAD(&cfg->rxsc_list);
1635 	pfvf->macsec_cfg = cfg;
1636 
1637 	pfvf->netdev->features |= NETIF_F_HW_MACSEC;
1638 	pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
1639 
1640 	mutex_lock(&mbox->lock);
1641 
1642 	req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
1643 	if (!req)
1644 		goto fail;
1645 
1646 	req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
1647 
1648 	if (otx2_sync_mbox_msg(mbox))
1649 		goto fail;
1650 
1651 	mutex_unlock(&mbox->lock);
1652 
1653 	return 0;
1654 fail:
1655 	dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
1656 	mutex_unlock(&mbox->lock);
1657 	return 0;
1658 }
1659 
1660 void cn10k_mcs_free(struct otx2_nic *pfvf)
1661 {
1662 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1663 		return;
1664 
1665 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
1666 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
1667 	kfree(pfvf->macsec_cfg);
1668 	pfvf->macsec_cfg = NULL;
1669 }
1670