1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MACSEC hardware offload driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #include <linux/rtnetlink.h>
8 #include <linux/bitfield.h>
9 #include <net/macsec.h>
10 #include "otx2_common.h"
11 
12 #define MCS_TCAM0_MAC_DA_MASK		GENMASK_ULL(47, 0)
13 #define MCS_TCAM0_MAC_SA_MASK		GENMASK_ULL(63, 48)
14 #define MCS_TCAM1_MAC_SA_MASK		GENMASK_ULL(31, 0)
15 #define MCS_TCAM1_ETYPE_MASK		GENMASK_ULL(47, 32)
16 
17 #define MCS_SA_MAP_MEM_SA_USE		BIT_ULL(9)
18 
19 #define MCS_RX_SECY_PLCY_RW_MASK	GENMASK_ULL(49, 18)
20 #define MCS_RX_SECY_PLCY_RP		BIT_ULL(17)
21 #define MCS_RX_SECY_PLCY_AUTH_ENA	BIT_ULL(16)
22 #define MCS_RX_SECY_PLCY_CIP		GENMASK_ULL(8, 5)
23 #define MCS_RX_SECY_PLCY_VAL		GENMASK_ULL(2, 1)
24 #define MCS_RX_SECY_PLCY_ENA		BIT_ULL(0)
25 
26 #define MCS_TX_SECY_PLCY_MTU		GENMASK_ULL(43, 28)
27 #define MCS_TX_SECY_PLCY_ST_TCI		GENMASK_ULL(27, 22)
28 #define MCS_TX_SECY_PLCY_ST_OFFSET	GENMASK_ULL(21, 15)
29 #define MCS_TX_SECY_PLCY_INS_MODE	BIT_ULL(14)
30 #define MCS_TX_SECY_PLCY_AUTH_ENA	BIT_ULL(13)
31 #define MCS_TX_SECY_PLCY_CIP		GENMASK_ULL(5, 2)
32 #define MCS_TX_SECY_PLCY_PROTECT	BIT_ULL(1)
33 #define MCS_TX_SECY_PLCY_ENA		BIT_ULL(0)
34 
35 #define MCS_GCM_AES_128			0
36 #define MCS_GCM_AES_256			1
37 #define MCS_GCM_AES_XPN_128		2
38 #define MCS_GCM_AES_XPN_256		3
39 
40 #define MCS_TCI_ES			0x40 /* end station */
41 #define MCS_TCI_SC			0x20 /* SCI present */
42 #define MCS_TCI_SCB			0x10 /* epon */
43 #define MCS_TCI_E			0x08 /* encryption */
44 #define MCS_TCI_C			0x04 /* changed text */
45 
46 static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
47 						 struct macsec_secy *secy)
48 {
49 	struct cn10k_mcs_txsc *txsc;
50 
51 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
52 		if (txsc->sw_secy == secy)
53 			return txsc;
54 	}
55 
56 	return NULL;
57 }
58 
59 static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
60 						 struct macsec_secy *secy,
61 						 struct macsec_rx_sc *rx_sc)
62 {
63 	struct cn10k_mcs_rxsc *rxsc;
64 
65 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
66 		if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
67 			return rxsc;
68 	}
69 
70 	return NULL;
71 }
72 
73 static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
74 {
75 	switch (rsrc_type) {
76 	case MCS_RSRC_TYPE_FLOWID:
77 		return "FLOW";
78 	case MCS_RSRC_TYPE_SC:
79 		return "SC";
80 	case MCS_RSRC_TYPE_SECY:
81 		return "SECY";
82 	case MCS_RSRC_TYPE_SA:
83 		return "SA";
84 	default:
85 		return "Unknown";
86 	};
87 
88 	return "Unknown";
89 }
90 
91 static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
92 				enum mcs_rsrc_type type, u16 *rsrc_id)
93 {
94 	struct mbox *mbox = &pfvf->mbox;
95 	struct mcs_alloc_rsrc_req *req;
96 	struct mcs_alloc_rsrc_rsp *rsp;
97 	int ret = -ENOMEM;
98 
99 	mutex_lock(&mbox->lock);
100 
101 	req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
102 	if (!req)
103 		goto fail;
104 
105 	req->rsrc_type = type;
106 	req->rsrc_cnt  = 1;
107 	req->dir = dir;
108 
109 	ret = otx2_sync_mbox_msg(mbox);
110 	if (ret)
111 		goto fail;
112 
113 	rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
114 							     0, &req->hdr);
115 	if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
116 	    req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
117 		ret = -EINVAL;
118 		goto fail;
119 	}
120 
121 	switch (rsp->rsrc_type) {
122 	case MCS_RSRC_TYPE_FLOWID:
123 		*rsrc_id = rsp->flow_ids[0];
124 		break;
125 	case MCS_RSRC_TYPE_SC:
126 		*rsrc_id = rsp->sc_ids[0];
127 		break;
128 	case MCS_RSRC_TYPE_SECY:
129 		*rsrc_id = rsp->secy_ids[0];
130 		break;
131 	case MCS_RSRC_TYPE_SA:
132 		*rsrc_id = rsp->sa_ids[0];
133 		break;
134 	default:
135 		ret = -EINVAL;
136 		goto fail;
137 	}
138 
139 	mutex_unlock(&mbox->lock);
140 
141 	return 0;
142 fail:
143 	dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
144 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
145 	mutex_unlock(&mbox->lock);
146 	return ret;
147 }
148 
149 static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
150 				enum mcs_rsrc_type type, u16 hw_rsrc_id,
151 				bool all)
152 {
153 	struct mcs_clear_stats *clear_req;
154 	struct mbox *mbox = &pfvf->mbox;
155 	struct mcs_free_rsrc_req *req;
156 
157 	mutex_lock(&mbox->lock);
158 
159 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
160 	if (!clear_req)
161 		goto fail;
162 
163 	clear_req->id = hw_rsrc_id;
164 	clear_req->type = type;
165 	clear_req->dir = dir;
166 
167 	req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
168 	if (!req)
169 		goto fail;
170 
171 	req->rsrc_id = hw_rsrc_id;
172 	req->rsrc_type = type;
173 	req->dir = dir;
174 	if (all)
175 		req->all = 1;
176 
177 	if (otx2_sync_mbox_msg(&pfvf->mbox))
178 		goto fail;
179 
180 	mutex_unlock(&mbox->lock);
181 
182 	return;
183 fail:
184 	dev_err(pfvf->dev, "Failed to free %s %s resource\n",
185 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
186 	mutex_unlock(&mbox->lock);
187 }
188 
189 static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
190 {
191 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
192 }
193 
194 static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
195 {
196 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
197 }
198 
199 static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
200 {
201 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
202 }
203 
204 static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
205 {
206 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
207 }
208 
209 static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
210 				   struct macsec_secy *secy, u8 hw_secy_id)
211 {
212 	struct mcs_secy_plcy_write_req *req;
213 	struct mbox *mbox = &pfvf->mbox;
214 	u64 policy;
215 	int ret;
216 
217 	mutex_lock(&mbox->lock);
218 
219 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
220 	if (!req) {
221 		ret = -ENOMEM;
222 		goto fail;
223 	}
224 
225 	policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
226 	if (secy->replay_protect)
227 		policy |= MCS_RX_SECY_PLCY_RP;
228 
229 	policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
230 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
231 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
232 
233 	policy |= MCS_RX_SECY_PLCY_ENA;
234 
235 	req->plcy = policy;
236 	req->secy_id = hw_secy_id;
237 	req->dir = MCS_RX;
238 
239 	ret = otx2_sync_mbox_msg(mbox);
240 
241 fail:
242 	mutex_unlock(&mbox->lock);
243 	return ret;
244 }
245 
246 static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
247 				     struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
248 {
249 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
250 	struct macsec_secy *secy = rxsc->sw_secy;
251 	struct mcs_flowid_entry_write_req *req;
252 	struct mbox *mbox = &pfvf->mbox;
253 	u64 mac_da;
254 	int ret;
255 
256 	mutex_lock(&mbox->lock);
257 
258 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
259 	if (!req) {
260 		ret = -ENOMEM;
261 		goto fail;
262 	}
263 
264 	mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
265 
266 	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
267 	req->mask[0] = ~0ULL;
268 	req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
269 
270 	req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
271 	req->mask[1] = ~0ULL;
272 	req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
273 
274 	req->mask[2] = ~0ULL;
275 	req->mask[3] = ~0ULL;
276 
277 	req->flow_id = rxsc->hw_flow_id;
278 	req->secy_id = hw_secy_id;
279 	req->sc_id = rxsc->hw_sc_id;
280 	req->dir = MCS_RX;
281 
282 	if (sw_rx_sc->active)
283 		req->ena = 1;
284 
285 	ret = otx2_sync_mbox_msg(mbox);
286 
287 fail:
288 	mutex_unlock(&mbox->lock);
289 	return ret;
290 }
291 
292 static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
293 				  struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
294 {
295 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
296 	struct mcs_rx_sc_cam_write_req *sc_req;
297 	struct mbox *mbox = &pfvf->mbox;
298 	int ret;
299 
300 	mutex_lock(&mbox->lock);
301 
302 	sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
303 	if (!sc_req) {
304 		ret = -ENOMEM;
305 		goto fail;
306 	}
307 
308 	sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
309 	sc_req->sc_id = rxsc->hw_sc_id;
310 	sc_req->secy_id = hw_secy_id;
311 
312 	ret = otx2_sync_mbox_msg(mbox);
313 
314 fail:
315 	mutex_unlock(&mbox->lock);
316 	return ret;
317 }
318 
319 static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
320 				      struct macsec_secy *secy,
321 				      struct cn10k_mcs_rxsc *rxsc,
322 				      u8 assoc_num, bool sa_in_use)
323 {
324 	unsigned char *src = rxsc->sa_key[assoc_num];
325 	struct mcs_sa_plcy_write_req *plcy_req;
326 	struct mcs_rx_sc_sa_map *map_req;
327 	struct mbox *mbox = &pfvf->mbox;
328 	u8 reg, key_len;
329 	int ret;
330 
331 	mutex_lock(&mbox->lock);
332 
333 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
334 	if (!plcy_req) {
335 		ret = -ENOMEM;
336 		goto fail;
337 	}
338 
339 	map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
340 	if (!map_req) {
341 		otx2_mbox_reset(&mbox->mbox, 0);
342 		ret = -ENOMEM;
343 		goto fail;
344 	}
345 
346 	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
347 		memcpy((u8 *)&plcy_req->plcy[0][reg],
348 		       (src + reg * 8), 8);
349 		reg++;
350 	}
351 
352 	plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
353 	plcy_req->sa_cnt = 1;
354 	plcy_req->dir = MCS_RX;
355 
356 	map_req->sa_index = rxsc->hw_sa_id[assoc_num];
357 	map_req->sa_in_use = sa_in_use;
358 	map_req->sc_id = rxsc->hw_sc_id;
359 	map_req->an = assoc_num;
360 
361 	/* Send two messages together */
362 	ret = otx2_sync_mbox_msg(mbox);
363 
364 fail:
365 	mutex_unlock(&mbox->lock);
366 	return ret;
367 }
368 
369 static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
370 				    struct cn10k_mcs_rxsc *rxsc,
371 				    u8 assoc_num, u64 next_pn)
372 {
373 	struct mcs_pn_table_write_req *req;
374 	struct mbox *mbox = &pfvf->mbox;
375 	int ret;
376 
377 	mutex_lock(&mbox->lock);
378 
379 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
380 	if (!req) {
381 		ret = -ENOMEM;
382 		goto fail;
383 	}
384 
385 	req->pn_id = rxsc->hw_sa_id[assoc_num];
386 	req->next_pn = next_pn;
387 	req->dir = MCS_RX;
388 
389 	ret = otx2_sync_mbox_msg(mbox);
390 
391 fail:
392 	mutex_unlock(&mbox->lock);
393 	return ret;
394 }
395 
396 static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
397 				   struct macsec_secy *secy,
398 				   struct cn10k_mcs_txsc *txsc)
399 {
400 	struct mcs_secy_plcy_write_req *req;
401 	struct mbox *mbox = &pfvf->mbox;
402 	struct macsec_tx_sc *sw_tx_sc;
403 	/* Insert SecTag after 12 bytes (DA+SA)*/
404 	u8 tag_offset = 12;
405 	u8 sectag_tci = 0;
406 	u64 policy;
407 	int ret;
408 
409 	sw_tx_sc = &secy->tx_sc;
410 
411 	mutex_lock(&mbox->lock);
412 
413 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
414 	if (!req) {
415 		ret = -ENOMEM;
416 		goto fail;
417 	}
418 
419 	if (sw_tx_sc->send_sci) {
420 		sectag_tci |= MCS_TCI_SC;
421 	} else {
422 		if (sw_tx_sc->end_station)
423 			sectag_tci |= MCS_TCI_ES;
424 		if (sw_tx_sc->scb)
425 			sectag_tci |= MCS_TCI_SCB;
426 	}
427 
428 	if (sw_tx_sc->encrypt)
429 		sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
430 
431 	policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
432 	/* Write SecTag excluding AN bits(1..0) */
433 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
434 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
435 	policy |= MCS_TX_SECY_PLCY_INS_MODE;
436 	policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
437 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
438 
439 	if (secy->protect_frames)
440 		policy |= MCS_TX_SECY_PLCY_PROTECT;
441 
442 	/* If the encodingsa does not exist/active and protect is
443 	 * not set then frames can be sent out as it is. Hence enable
444 	 * the policy irrespective of secy operational when !protect.
445 	 */
446 	if (!secy->protect_frames || secy->operational)
447 		policy |= MCS_TX_SECY_PLCY_ENA;
448 
449 	req->plcy = policy;
450 	req->secy_id = txsc->hw_secy_id_tx;
451 	req->dir = MCS_TX;
452 
453 	ret = otx2_sync_mbox_msg(mbox);
454 
455 fail:
456 	mutex_unlock(&mbox->lock);
457 	return ret;
458 }
459 
460 static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
461 				     struct macsec_secy *secy,
462 				     struct cn10k_mcs_txsc *txsc)
463 {
464 	struct mcs_flowid_entry_write_req *req;
465 	struct mbox *mbox = &pfvf->mbox;
466 	u64 mac_sa;
467 	int ret;
468 
469 	mutex_lock(&mbox->lock);
470 
471 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
472 	if (!req) {
473 		ret = -ENOMEM;
474 		goto fail;
475 	}
476 
477 	mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
478 
479 	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
480 	req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
481 
482 	req->mask[0] = ~0ULL;
483 	req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
484 
485 	req->mask[1] = ~0ULL;
486 	req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
487 
488 	req->mask[2] = ~0ULL;
489 	req->mask[3] = ~0ULL;
490 
491 	req->flow_id = txsc->hw_flow_id;
492 	req->secy_id = txsc->hw_secy_id_tx;
493 	req->sc_id = txsc->hw_sc_id;
494 	req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
495 	req->dir = MCS_TX;
496 	/* This can be enabled since stack xmits packets only when interface is up */
497 	req->ena = 1;
498 
499 	ret = otx2_sync_mbox_msg(mbox);
500 
501 fail:
502 	mutex_unlock(&mbox->lock);
503 	return ret;
504 }
505 
506 static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
507 				   struct macsec_secy *secy,
508 				   struct cn10k_mcs_txsc *txsc,
509 				   u8 sa_num, bool sa_active)
510 {
511 	struct mcs_tx_sc_sa_map *map_req;
512 	struct mbox *mbox = &pfvf->mbox;
513 	int ret;
514 
515 	/* Link the encoding_sa only to SC out of all SAs */
516 	if (txsc->encoding_sa != sa_num)
517 		return 0;
518 
519 	mutex_lock(&mbox->lock);
520 
521 	map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
522 	if (!map_req) {
523 		otx2_mbox_reset(&mbox->mbox, 0);
524 		ret = -ENOMEM;
525 		goto fail;
526 	}
527 
528 	map_req->sa_index0 = txsc->hw_sa_id[sa_num];
529 	map_req->sa_index0_vld = sa_active;
530 	map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
531 	map_req->sc_id = txsc->hw_sc_id;
532 
533 	ret = otx2_sync_mbox_msg(mbox);
534 
535 fail:
536 	mutex_unlock(&mbox->lock);
537 	return ret;
538 }
539 
540 static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
541 				      struct macsec_secy *secy,
542 				      struct cn10k_mcs_txsc *txsc,
543 				      u8 assoc_num)
544 {
545 	unsigned char *src = txsc->sa_key[assoc_num];
546 	struct mcs_sa_plcy_write_req *plcy_req;
547 	struct mbox *mbox = &pfvf->mbox;
548 	u8 reg, key_len;
549 	int ret;
550 
551 	mutex_lock(&mbox->lock);
552 
553 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
554 	if (!plcy_req) {
555 		ret = -ENOMEM;
556 		goto fail;
557 	}
558 
559 	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
560 		memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
561 		reg++;
562 	}
563 
564 	plcy_req->plcy[0][8] = assoc_num;
565 	plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
566 	plcy_req->sa_cnt = 1;
567 	plcy_req->dir = MCS_TX;
568 
569 	ret = otx2_sync_mbox_msg(mbox);
570 
571 fail:
572 	mutex_unlock(&mbox->lock);
573 	return ret;
574 }
575 
576 static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
577 				struct cn10k_mcs_txsc *txsc,
578 				u8 assoc_num, u64 next_pn)
579 {
580 	struct mcs_pn_table_write_req *req;
581 	struct mbox *mbox = &pfvf->mbox;
582 	int ret;
583 
584 	mutex_lock(&mbox->lock);
585 
586 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
587 	if (!req) {
588 		ret = -ENOMEM;
589 		goto fail;
590 	}
591 
592 	req->pn_id = txsc->hw_sa_id[assoc_num];
593 	req->next_pn = next_pn;
594 	req->dir = MCS_TX;
595 
596 	ret = otx2_sync_mbox_msg(mbox);
597 
598 fail:
599 	mutex_unlock(&mbox->lock);
600 	return ret;
601 }
602 
603 static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
604 				    bool enable, enum mcs_direction dir)
605 {
606 	struct mcs_flowid_ena_dis_entry *req;
607 	struct mbox *mbox = &pfvf->mbox;
608 	int ret;
609 
610 	mutex_lock(&mbox->lock);
611 
612 	req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
613 	if (!req) {
614 		ret = -ENOMEM;
615 		goto fail;
616 	}
617 
618 	req->flow_id = hw_flow_id;
619 	req->ena = enable;
620 	req->dir = dir;
621 
622 	ret = otx2_sync_mbox_msg(mbox);
623 
624 fail:
625 	mutex_unlock(&mbox->lock);
626 	return ret;
627 }
628 
629 static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
630 			      struct mcs_sa_stats *rsp_p,
631 			      enum mcs_direction dir, bool clear)
632 {
633 	struct mcs_clear_stats *clear_req;
634 	struct mbox *mbox = &pfvf->mbox;
635 	struct mcs_stats_req *req;
636 	struct mcs_sa_stats *rsp;
637 	int ret;
638 
639 	mutex_lock(&mbox->lock);
640 
641 	req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
642 	if (!req) {
643 		ret = -ENOMEM;
644 		goto fail;
645 	}
646 
647 	req->id = hw_sa_id;
648 	req->dir = dir;
649 
650 	if (!clear)
651 		goto send_msg;
652 
653 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
654 	if (!clear_req) {
655 		ret = -ENOMEM;
656 		goto fail;
657 	}
658 	clear_req->id = hw_sa_id;
659 	clear_req->dir = dir;
660 	clear_req->type = MCS_RSRC_TYPE_SA;
661 
662 send_msg:
663 	ret = otx2_sync_mbox_msg(mbox);
664 	if (ret)
665 		goto fail;
666 
667 	rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
668 						       0, &req->hdr);
669 	if (IS_ERR(rsp)) {
670 		ret = PTR_ERR(rsp);
671 		goto fail;
672 	}
673 
674 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
675 
676 	mutex_unlock(&mbox->lock);
677 
678 	return 0;
679 fail:
680 	mutex_unlock(&mbox->lock);
681 	return ret;
682 }
683 
684 static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
685 			      struct mcs_sc_stats *rsp_p,
686 			      enum mcs_direction dir, bool clear)
687 {
688 	struct mcs_clear_stats *clear_req;
689 	struct mbox *mbox = &pfvf->mbox;
690 	struct mcs_stats_req *req;
691 	struct mcs_sc_stats *rsp;
692 	int ret;
693 
694 	mutex_lock(&mbox->lock);
695 
696 	req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
697 	if (!req) {
698 		ret = -ENOMEM;
699 		goto fail;
700 	}
701 
702 	req->id = hw_sc_id;
703 	req->dir = dir;
704 
705 	if (!clear)
706 		goto send_msg;
707 
708 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
709 	if (!clear_req) {
710 		ret = -ENOMEM;
711 		goto fail;
712 	}
713 	clear_req->id = hw_sc_id;
714 	clear_req->dir = dir;
715 	clear_req->type = MCS_RSRC_TYPE_SC;
716 
717 send_msg:
718 	ret = otx2_sync_mbox_msg(mbox);
719 	if (ret)
720 		goto fail;
721 
722 	rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
723 						       0, &req->hdr);
724 	if (IS_ERR(rsp)) {
725 		ret = PTR_ERR(rsp);
726 		goto fail;
727 	}
728 
729 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
730 
731 	mutex_unlock(&mbox->lock);
732 
733 	return 0;
734 fail:
735 	mutex_unlock(&mbox->lock);
736 	return ret;
737 }
738 
739 static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
740 				struct mcs_secy_stats *rsp_p,
741 				enum mcs_direction dir, bool clear)
742 {
743 	struct mcs_clear_stats *clear_req;
744 	struct mbox *mbox = &pfvf->mbox;
745 	struct mcs_secy_stats *rsp;
746 	struct mcs_stats_req *req;
747 	int ret;
748 
749 	mutex_lock(&mbox->lock);
750 
751 	req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
752 	if (!req) {
753 		ret = -ENOMEM;
754 		goto fail;
755 	}
756 
757 	req->id = hw_secy_id;
758 	req->dir = dir;
759 
760 	if (!clear)
761 		goto send_msg;
762 
763 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
764 	if (!clear_req) {
765 		ret = -ENOMEM;
766 		goto fail;
767 	}
768 	clear_req->id = hw_secy_id;
769 	clear_req->dir = dir;
770 	clear_req->type = MCS_RSRC_TYPE_SECY;
771 
772 send_msg:
773 	ret = otx2_sync_mbox_msg(mbox);
774 	if (ret)
775 		goto fail;
776 
777 	rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
778 							 0, &req->hdr);
779 	if (IS_ERR(rsp)) {
780 		ret = PTR_ERR(rsp);
781 		goto fail;
782 	}
783 
784 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
785 
786 	mutex_unlock(&mbox->lock);
787 
788 	return 0;
789 fail:
790 	mutex_unlock(&mbox->lock);
791 	return ret;
792 }
793 
794 static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
795 {
796 	struct cn10k_mcs_txsc *txsc;
797 	int ret;
798 
799 	txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
800 	if (!txsc)
801 		return ERR_PTR(-ENOMEM);
802 
803 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
804 				   &txsc->hw_flow_id);
805 	if (ret)
806 		goto fail;
807 
808 	/* For a SecY, one TX secy and one RX secy HW resources are needed */
809 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
810 				   &txsc->hw_secy_id_tx);
811 	if (ret)
812 		goto free_flowid;
813 
814 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
815 				   &txsc->hw_secy_id_rx);
816 	if (ret)
817 		goto free_tx_secy;
818 
819 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
820 				   &txsc->hw_sc_id);
821 	if (ret)
822 		goto free_rx_secy;
823 
824 	return txsc;
825 free_rx_secy:
826 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
827 			    txsc->hw_secy_id_rx, false);
828 free_tx_secy:
829 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
830 			    txsc->hw_secy_id_tx, false);
831 free_flowid:
832 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
833 			    txsc->hw_flow_id, false);
834 fail:
835 	kfree(txsc);
836 	return ERR_PTR(ret);
837 }
838 
839 /* Free Tx SC and its SAs(if any) resources to AF
840  */
841 static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
842 				  struct cn10k_mcs_txsc *txsc)
843 {
844 	u8 sa_bmap = txsc->sa_bmap;
845 	u8 sa_num = 0;
846 
847 	while (sa_bmap) {
848 		if (sa_bmap & 1) {
849 			cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
850 						   txsc, sa_num);
851 			cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
852 		}
853 		sa_num++;
854 		sa_bmap >>= 1;
855 	}
856 
857 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
858 			    txsc->hw_sc_id, false);
859 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
860 			    txsc->hw_secy_id_rx, false);
861 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
862 			    txsc->hw_secy_id_tx, false);
863 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
864 			    txsc->hw_flow_id, false);
865 }
866 
867 static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
868 {
869 	struct cn10k_mcs_rxsc *rxsc;
870 	int ret;
871 
872 	rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
873 	if (!rxsc)
874 		return ERR_PTR(-ENOMEM);
875 
876 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
877 				   &rxsc->hw_flow_id);
878 	if (ret)
879 		goto fail;
880 
881 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
882 				   &rxsc->hw_sc_id);
883 	if (ret)
884 		goto free_flowid;
885 
886 	return rxsc;
887 free_flowid:
888 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
889 			    rxsc->hw_flow_id, false);
890 fail:
891 	kfree(rxsc);
892 	return ERR_PTR(ret);
893 }
894 
895 /* Free Rx SC and its SAs(if any) resources to AF
896  */
897 static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
898 				  struct cn10k_mcs_rxsc *rxsc)
899 {
900 	u8 sa_bmap = rxsc->sa_bmap;
901 	u8 sa_num = 0;
902 
903 	while (sa_bmap) {
904 		if (sa_bmap & 1) {
905 			cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
906 						   sa_num, false);
907 			cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
908 		}
909 		sa_num++;
910 		sa_bmap >>= 1;
911 	}
912 
913 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
914 			    rxsc->hw_sc_id, false);
915 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
916 			    rxsc->hw_flow_id, false);
917 }
918 
919 static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
920 				 struct cn10k_mcs_txsc *txsc,
921 				 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
922 {
923 	if (sw_tx_sa) {
924 		cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
925 		cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
926 				     sw_tx_sa->next_pn_halves.lower);
927 		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
928 					sw_tx_sa->active);
929 	}
930 
931 	cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
932 	cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
933 	/* When updating secy, change RX secy also */
934 	cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
935 
936 	return 0;
937 }
938 
939 static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
940 				 struct macsec_secy *secy, u8 hw_secy_id)
941 {
942 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
943 	struct cn10k_mcs_rxsc *mcs_rx_sc;
944 	struct macsec_rx_sc *sw_rx_sc;
945 	struct macsec_rx_sa *sw_rx_sa;
946 	u8 sa_num;
947 
948 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
949 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
950 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
951 		if (unlikely(!mcs_rx_sc))
952 			continue;
953 
954 		for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
955 			sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
956 			if (!sw_rx_sa)
957 				continue;
958 
959 			cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
960 						   sa_num, sw_rx_sa->active);
961 			cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
962 						 sw_rx_sa->next_pn_halves.lower);
963 		}
964 
965 		cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
966 		cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
967 	}
968 
969 	return 0;
970 }
971 
972 static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
973 				   struct macsec_secy *secy,
974 				   bool delete)
975 {
976 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
977 	struct cn10k_mcs_rxsc *mcs_rx_sc;
978 	struct macsec_rx_sc *sw_rx_sc;
979 	int ret;
980 
981 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
982 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
983 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
984 		if (unlikely(!mcs_rx_sc))
985 			continue;
986 
987 		ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
988 					       false, MCS_RX);
989 		if (ret)
990 			dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
991 				mcs_rx_sc->hw_sc_id);
992 		if (delete) {
993 			cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
994 			list_del(&mcs_rx_sc->entry);
995 			kfree(mcs_rx_sc);
996 		}
997 	}
998 
999 	return 0;
1000 }
1001 
1002 static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
1003 				 struct cn10k_mcs_txsc *txsc)
1004 {
1005 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1006 	struct mcs_secy_stats rx_rsp = { 0 };
1007 	struct mcs_sc_stats sc_rsp = { 0 };
1008 	struct cn10k_mcs_rxsc *rxsc;
1009 
1010 	/* Because of shared counters for some stats in the hardware, when
1011 	 * updating secy policy take a snapshot of current stats and reset them.
1012 	 * Below are the effected stats because of shared counters.
1013 	 */
1014 
1015 	/* Check if sync is really needed */
1016 	if (secy->validate_frames == txsc->last_validate_frames &&
1017 	    secy->replay_protect == txsc->last_replay_protect)
1018 		return;
1019 
1020 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1021 
1022 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1023 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1024 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1025 	if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
1026 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1027 	else
1028 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1029 
1030 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
1031 		cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
1032 
1033 		rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
1034 		rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
1035 
1036 		rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
1037 		rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
1038 
1039 		if (txsc->last_replay_protect)
1040 			rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
1041 		else
1042 			rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
1043 
1044 		if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
1045 			rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
1046 		else
1047 			rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
1048 	}
1049 
1050 	txsc->last_validate_frames = secy->validate_frames;
1051 	txsc->last_replay_protect = secy->replay_protect;
1052 }
1053 
1054 static int cn10k_mdo_open(struct macsec_context *ctx)
1055 {
1056 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1057 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1058 	struct macsec_secy *secy = ctx->secy;
1059 	struct macsec_tx_sa *sw_tx_sa;
1060 	struct cn10k_mcs_txsc *txsc;
1061 	u8 sa_num;
1062 	int err;
1063 
1064 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1065 	if (!txsc)
1066 		return -ENOENT;
1067 
1068 	sa_num = txsc->encoding_sa;
1069 	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1070 
1071 	err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1072 	if (err)
1073 		return err;
1074 
1075 	return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
1076 }
1077 
1078 static int cn10k_mdo_stop(struct macsec_context *ctx)
1079 {
1080 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1081 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1082 	struct cn10k_mcs_txsc *txsc;
1083 	int err;
1084 
1085 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1086 	if (!txsc)
1087 		return -ENOENT;
1088 
1089 	err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1090 	if (err)
1091 		return err;
1092 
1093 	return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
1094 }
1095 
1096 static int cn10k_mdo_add_secy(struct macsec_context *ctx)
1097 {
1098 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1099 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1100 	struct macsec_secy *secy = ctx->secy;
1101 	struct cn10k_mcs_txsc *txsc;
1102 
1103 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
1104 		return -EOPNOTSUPP;
1105 
1106 	/* Stick to 16 bytes key len until XPN support is added */
1107 	if (secy->key_len != 16)
1108 		return -EOPNOTSUPP;
1109 
1110 	if (secy->xpn)
1111 		return -EOPNOTSUPP;
1112 
1113 	txsc = cn10k_mcs_create_txsc(pfvf);
1114 	if (IS_ERR(txsc))
1115 		return -ENOSPC;
1116 
1117 	txsc->sw_secy = secy;
1118 	txsc->encoding_sa = secy->tx_sc.encoding_sa;
1119 	txsc->last_validate_frames = secy->validate_frames;
1120 	txsc->last_replay_protect = secy->replay_protect;
1121 
1122 	list_add(&txsc->entry, &cfg->txsc_list);
1123 
1124 	if (netif_running(secy->netdev))
1125 		return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1126 
1127 	return 0;
1128 }
1129 
1130 static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
1131 {
1132 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1133 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1134 	struct macsec_secy *secy = ctx->secy;
1135 	struct macsec_tx_sa *sw_tx_sa;
1136 	struct cn10k_mcs_txsc *txsc;
1137 	bool active;
1138 	u8 sa_num;
1139 	int err;
1140 
1141 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1142 	if (!txsc)
1143 		return -ENOENT;
1144 
1145 	/* Encoding SA got changed */
1146 	if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
1147 		txsc->encoding_sa = secy->tx_sc.encoding_sa;
1148 		sa_num = txsc->encoding_sa;
1149 		sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1150 		active = sw_tx_sa ? sw_tx_sa->active : false;
1151 		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
1152 	}
1153 
1154 	if (netif_running(secy->netdev)) {
1155 		cn10k_mcs_sync_stats(pfvf, secy, txsc);
1156 
1157 		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1158 		if (err)
1159 			return err;
1160 	}
1161 
1162 	return 0;
1163 }
1164 
1165 static int cn10k_mdo_del_secy(struct macsec_context *ctx)
1166 {
1167 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1168 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1169 	struct cn10k_mcs_txsc *txsc;
1170 
1171 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1172 	if (!txsc)
1173 		return -ENOENT;
1174 
1175 	cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1176 	cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
1177 	cn10k_mcs_delete_txsc(pfvf, txsc);
1178 	list_del(&txsc->entry);
1179 	kfree(txsc);
1180 
1181 	return 0;
1182 }
1183 
1184 static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
1185 {
1186 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1187 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1188 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1189 	struct macsec_secy *secy = ctx->secy;
1190 	u8 sa_num = ctx->sa.assoc_num;
1191 	struct cn10k_mcs_txsc *txsc;
1192 	int err;
1193 
1194 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1195 	if (!txsc)
1196 		return -ENOENT;
1197 
1198 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1199 		return -EOPNOTSUPP;
1200 
1201 	if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
1202 		return -ENOSPC;
1203 
1204 	memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
1205 	txsc->sa_bmap |= 1 << sa_num;
1206 
1207 	if (netif_running(secy->netdev)) {
1208 		err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1209 		if (err)
1210 			return err;
1211 
1212 		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1213 					   sw_tx_sa->next_pn_halves.lower);
1214 		if (err)
1215 			return err;
1216 
1217 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1218 					      sa_num, sw_tx_sa->active);
1219 		if (err)
1220 			return err;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
1227 {
1228 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1229 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1230 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1231 	struct macsec_secy *secy = ctx->secy;
1232 	u8 sa_num = ctx->sa.assoc_num;
1233 	struct cn10k_mcs_txsc *txsc;
1234 	int err;
1235 
1236 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1237 	if (!txsc)
1238 		return -ENOENT;
1239 
1240 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1241 		return -EOPNOTSUPP;
1242 
1243 	if (netif_running(secy->netdev)) {
1244 		/* Keys cannot be changed after creation */
1245 		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1246 					   sw_tx_sa->next_pn_halves.lower);
1247 		if (err)
1248 			return err;
1249 
1250 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1251 					      sa_num, sw_tx_sa->active);
1252 		if (err)
1253 			return err;
1254 	}
1255 
1256 	return 0;
1257 }
1258 
1259 static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
1260 {
1261 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1262 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1263 	u8 sa_num = ctx->sa.assoc_num;
1264 	struct cn10k_mcs_txsc *txsc;
1265 
1266 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1267 	if (!txsc)
1268 		return -ENOENT;
1269 
1270 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1271 		return -EOPNOTSUPP;
1272 
1273 	cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
1274 	txsc->sa_bmap &= ~(1 << sa_num);
1275 
1276 	return 0;
1277 }
1278 
1279 static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
1280 {
1281 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1282 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1283 	struct macsec_secy *secy = ctx->secy;
1284 	struct cn10k_mcs_rxsc *rxsc;
1285 	struct cn10k_mcs_txsc *txsc;
1286 	int err;
1287 
1288 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1289 	if (!txsc)
1290 		return -ENOENT;
1291 
1292 	rxsc = cn10k_mcs_create_rxsc(pfvf);
1293 	if (IS_ERR(rxsc))
1294 		return -ENOSPC;
1295 
1296 	rxsc->sw_secy = ctx->secy;
1297 	rxsc->sw_rxsc = ctx->rx_sc;
1298 	list_add(&rxsc->entry, &cfg->rxsc_list);
1299 
1300 	if (netif_running(secy->netdev)) {
1301 		err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
1302 		if (err)
1303 			return err;
1304 
1305 		err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
1306 		if (err)
1307 			return err;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
1314 {
1315 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1316 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1317 	struct macsec_secy *secy = ctx->secy;
1318 	bool enable = ctx->rx_sc->active;
1319 	struct cn10k_mcs_rxsc *rxsc;
1320 
1321 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1322 	if (!rxsc)
1323 		return -ENOENT;
1324 
1325 	if (netif_running(secy->netdev))
1326 		return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
1327 						enable, MCS_RX);
1328 
1329 	return 0;
1330 }
1331 
1332 static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
1333 {
1334 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1335 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1336 	struct cn10k_mcs_rxsc *rxsc;
1337 
1338 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
1339 	if (!rxsc)
1340 		return -ENOENT;
1341 
1342 	cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
1343 	cn10k_mcs_delete_rxsc(pfvf, rxsc);
1344 	list_del(&rxsc->entry);
1345 	kfree(rxsc);
1346 
1347 	return 0;
1348 }
1349 
1350 static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
1351 {
1352 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1353 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1354 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1355 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1356 	u64 next_pn = rx_sa->next_pn_halves.lower;
1357 	struct macsec_secy *secy = ctx->secy;
1358 	bool sa_in_use = rx_sa->active;
1359 	u8 sa_num = ctx->sa.assoc_num;
1360 	struct cn10k_mcs_rxsc *rxsc;
1361 	int err;
1362 
1363 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1364 	if (!rxsc)
1365 		return -ENOENT;
1366 
1367 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1368 		return -EOPNOTSUPP;
1369 
1370 	if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
1371 		return -ENOSPC;
1372 
1373 	memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
1374 	rxsc->sa_bmap |= 1 << sa_num;
1375 
1376 	if (netif_running(secy->netdev)) {
1377 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
1378 						 sa_num, sa_in_use);
1379 		if (err)
1380 			return err;
1381 
1382 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
1383 		if (err)
1384 			return err;
1385 	}
1386 
1387 	return 0;
1388 }
1389 
1390 static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
1391 {
1392 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1393 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1394 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1395 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1396 	u64 next_pn = rx_sa->next_pn_halves.lower;
1397 	struct macsec_secy *secy = ctx->secy;
1398 	bool sa_in_use = rx_sa->active;
1399 	u8 sa_num = ctx->sa.assoc_num;
1400 	struct cn10k_mcs_rxsc *rxsc;
1401 	int err;
1402 
1403 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1404 	if (!rxsc)
1405 		return -ENOENT;
1406 
1407 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1408 		return -EOPNOTSUPP;
1409 
1410 	if (netif_running(secy->netdev)) {
1411 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
1412 		if (err)
1413 			return err;
1414 
1415 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
1416 		if (err)
1417 			return err;
1418 	}
1419 
1420 	return 0;
1421 }
1422 
1423 static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
1424 {
1425 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1426 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1427 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1428 	u8 sa_num = ctx->sa.assoc_num;
1429 	struct cn10k_mcs_rxsc *rxsc;
1430 
1431 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1432 	if (!rxsc)
1433 		return -ENOENT;
1434 
1435 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1436 		return -EOPNOTSUPP;
1437 
1438 	cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
1439 	cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1440 
1441 	rxsc->sa_bmap &= ~(1 << sa_num);
1442 
1443 	return 0;
1444 }
1445 
1446 static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
1447 {
1448 	struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
1449 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1450 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1451 	struct macsec_secy *secy = ctx->secy;
1452 	struct cn10k_mcs_txsc *txsc;
1453 
1454 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1455 	if (!txsc)
1456 		return -ENOENT;
1457 
1458 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
1459 	ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
1460 	ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
1461 
1462 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1463 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1464 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1465 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1466 	if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
1467 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1468 	else
1469 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1470 	txsc->stats.InPktsOverrun = 0;
1471 
1472 	ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
1473 	ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
1474 	ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
1475 	ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
1476 	ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
1477 	ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
1478 
1479 	return 0;
1480 }
1481 
1482 static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
1483 {
1484 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1485 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1486 	struct mcs_sc_stats rsp = { 0 };
1487 	struct cn10k_mcs_txsc *txsc;
1488 
1489 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1490 	if (!txsc)
1491 		return -ENOENT;
1492 
1493 	cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
1494 
1495 	ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1496 	ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1497 	ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
1498 	ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
1499 
1500 	return 0;
1501 }
1502 
1503 static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
1504 {
1505 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1506 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1507 	struct mcs_sa_stats rsp = { 0 };
1508 	u8 sa_num = ctx->sa.assoc_num;
1509 	struct cn10k_mcs_txsc *txsc;
1510 
1511 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1512 	if (!txsc)
1513 		return -ENOENT;
1514 
1515 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1516 		return -EOPNOTSUPP;
1517 
1518 	cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
1519 
1520 	ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1521 	ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1522 
1523 	return 0;
1524 }
1525 
1526 static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
1527 {
1528 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1529 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1530 	struct macsec_secy *secy = ctx->secy;
1531 	struct mcs_sc_stats rsp = { 0 };
1532 	struct cn10k_mcs_rxsc *rxsc;
1533 
1534 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1535 	if (!rxsc)
1536 		return -ENOENT;
1537 
1538 	cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
1539 
1540 	rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
1541 	rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
1542 
1543 	rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
1544 	rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
1545 
1546 	if (secy->replay_protect)
1547 		rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
1548 	else
1549 		rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
1550 
1551 	if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
1552 		rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
1553 	else
1554 		rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
1555 
1556 	ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
1557 	ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
1558 	ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
1559 	ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
1560 	ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
1561 	ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
1562 	ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
1563 	ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
1564 
1565 	return 0;
1566 }
1567 
1568 static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
1569 {
1570 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1571 	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1572 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1573 	struct mcs_sa_stats rsp = { 0 };
1574 	u8 sa_num = ctx->sa.assoc_num;
1575 	struct cn10k_mcs_rxsc *rxsc;
1576 
1577 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1578 	if (!rxsc)
1579 		return -ENOENT;
1580 
1581 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1582 		return -EOPNOTSUPP;
1583 
1584 	cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
1585 
1586 	ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
1587 	ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
1588 	ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
1589 	ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
1590 	ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
1591 
1592 	return 0;
1593 }
1594 
1595 static const struct macsec_ops cn10k_mcs_ops = {
1596 	.mdo_dev_open = cn10k_mdo_open,
1597 	.mdo_dev_stop = cn10k_mdo_stop,
1598 	.mdo_add_secy = cn10k_mdo_add_secy,
1599 	.mdo_upd_secy = cn10k_mdo_upd_secy,
1600 	.mdo_del_secy = cn10k_mdo_del_secy,
1601 	.mdo_add_rxsc = cn10k_mdo_add_rxsc,
1602 	.mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
1603 	.mdo_del_rxsc = cn10k_mdo_del_rxsc,
1604 	.mdo_add_rxsa = cn10k_mdo_add_rxsa,
1605 	.mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
1606 	.mdo_del_rxsa = cn10k_mdo_del_rxsa,
1607 	.mdo_add_txsa = cn10k_mdo_add_txsa,
1608 	.mdo_upd_txsa = cn10k_mdo_upd_txsa,
1609 	.mdo_del_txsa = cn10k_mdo_del_txsa,
1610 	.mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
1611 	.mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
1612 	.mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
1613 	.mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
1614 	.mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
1615 };
1616 
1617 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
1618 {
1619 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1620 	struct macsec_tx_sa *sw_tx_sa = NULL;
1621 	struct macsec_secy *secy = NULL;
1622 	struct cn10k_mcs_txsc *txsc;
1623 	u8 an;
1624 
1625 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1626 		return;
1627 
1628 	if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
1629 		return;
1630 
1631 	/* Find the SecY to which the expired hardware SA is mapped */
1632 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
1633 		for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
1634 			if (txsc->hw_sa_id[an] == event->sa_id) {
1635 				secy = txsc->sw_secy;
1636 				sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
1637 			}
1638 	}
1639 
1640 	if (secy && sw_tx_sa)
1641 		macsec_pn_wrapped(secy, sw_tx_sa);
1642 }
1643 
1644 int cn10k_mcs_init(struct otx2_nic *pfvf)
1645 {
1646 	struct mbox *mbox = &pfvf->mbox;
1647 	struct cn10k_mcs_cfg *cfg;
1648 	struct mcs_intr_cfg *req;
1649 
1650 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1651 		return 0;
1652 
1653 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1654 	if (!cfg)
1655 		return -ENOMEM;
1656 
1657 	INIT_LIST_HEAD(&cfg->txsc_list);
1658 	INIT_LIST_HEAD(&cfg->rxsc_list);
1659 	pfvf->macsec_cfg = cfg;
1660 
1661 	pfvf->netdev->features |= NETIF_F_HW_MACSEC;
1662 	pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
1663 
1664 	mutex_lock(&mbox->lock);
1665 
1666 	req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
1667 	if (!req)
1668 		goto fail;
1669 
1670 	req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
1671 
1672 	if (otx2_sync_mbox_msg(mbox))
1673 		goto fail;
1674 
1675 	mutex_unlock(&mbox->lock);
1676 
1677 	return 0;
1678 fail:
1679 	dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
1680 	mutex_unlock(&mbox->lock);
1681 	return 0;
1682 }
1683 
1684 void cn10k_mcs_free(struct otx2_nic *pfvf)
1685 {
1686 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1687 		return;
1688 
1689 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
1690 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
1691 	kfree(pfvf->macsec_cfg);
1692 	pfvf->macsec_cfg = NULL;
1693 }
1694