1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2022 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/firmware.h>
12 #include <linux/stddef.h>
13 #include <linux/debugfs.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "npc.h"
19 #include "cgx.h"
20 #include "rvu_npc_fs.h"
21 #include "rvu_npc_hash.h"
22 
23 static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
24 				size_t width_bits)
25 {
26 	const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
27 	const size_t msb = start_bit + width_bits - 1;
28 	const size_t lword = start_bit >> 6;
29 	const size_t uword = msb >> 6;
30 	size_t lbits;
31 	u64 hi, lo;
32 
33 	if (lword == uword)
34 		return (input[lword] >> (start_bit & 63)) & mask;
35 
36 	lbits = 64 - (start_bit & 63);
37 	hi = input[uword];
38 	lo = (input[lword] >> (start_bit & 63));
39 	return ((hi << lbits) | lo) & mask;
40 }
41 
42 static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
43 {
44 	u64 prev_orig_word = 0;
45 	u64 cur_orig_word = 0;
46 	size_t extra = key_bit_len % 64;
47 	size_t max_idx = key_bit_len / 64;
48 	size_t i;
49 
50 	if (extra)
51 		max_idx++;
52 
53 	for (i = 0; i < max_idx; i++) {
54 		cur_orig_word = key[i];
55 		key[i] = key[i] << 1;
56 		key[i] |= ((prev_orig_word >> 63) & 0x1);
57 		prev_orig_word = cur_orig_word;
58 	}
59 }
60 
61 static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
62 				 size_t key_bit_len)
63 {
64 	u32 hash_out = 0;
65 	u64 temp_data = 0;
66 	int i;
67 
68 	for (i = data_bit_len - 1; i >= 0; i--) {
69 		temp_data = (data[i / 64]);
70 		temp_data = temp_data >> (i % 64);
71 		temp_data &= 0x1;
72 		if (temp_data)
73 			hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
74 
75 		rvu_npc_lshift_key(key, key_bit_len);
76 	}
77 
78 	return hash_out;
79 }
80 
81 u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
82 			u64 *secret_key, u8 intf, u8 hash_idx)
83 {
84 	u64 hash_key[3];
85 	u64 data_padded[2];
86 	u32 field_hash;
87 
88 	hash_key[0] = secret_key[1] << 31;
89 	hash_key[0] |= secret_key[2];
90 	hash_key[1] = secret_key[1] >> 33;
91 	hash_key[1] |= secret_key[0] << 31;
92 	hash_key[2] = secret_key[0] >> 33;
93 
94 	data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0];
95 	data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1];
96 	field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
97 
98 	field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32;
99 	field_hash |= mkex_hash->hash_ctrl[intf][hash_idx];
100 	return field_hash;
101 }
102 
103 static u64 npc_update_use_hash(int lt, int ld)
104 {
105 	u64 cfg = 0;
106 
107 	switch (lt) {
108 	case NPC_LT_LC_IP6:
109 		/* Update use_hash(bit-20) and bytesm1 (bit-16:19)
110 		 * in KEX_LD_CFG
111 		 */
112 		cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
113 					  ld ? 0x8 : 0x18,
114 					  0x1, 0x0, 0x10);
115 		break;
116 	}
117 
118 	return cfg;
119 }
120 
121 static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
122 				     u8 intf)
123 {
124 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
125 	int lid, lt, ld, hash_cnt = 0;
126 
127 	if (is_npc_intf_tx(intf))
128 		return;
129 
130 	/* Program HASH_CFG */
131 	for (lid = 0; lid < NPC_MAX_LID; lid++) {
132 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
133 			for (ld = 0; ld < NPC_MAX_LD; ld++) {
134 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
135 					u64 cfg = npc_update_use_hash(lt, ld);
136 
137 					hash_cnt++;
138 					if (hash_cnt == NPC_MAX_HASH)
139 						return;
140 
141 					/* Set updated KEX configuration */
142 					SET_KEX_LD(intf, lid, lt, ld, cfg);
143 					/* Set HASH configuration */
144 					SET_KEX_LD_HASH(intf, ld,
145 							mkex_hash->hash[intf][ld]);
146 					SET_KEX_LD_HASH_MASK(intf, ld, 0,
147 							     mkex_hash->hash_mask[intf][ld][0]);
148 					SET_KEX_LD_HASH_MASK(intf, ld, 1,
149 							     mkex_hash->hash_mask[intf][ld][1]);
150 					SET_KEX_LD_HASH_CTRL(intf, ld,
151 							     mkex_hash->hash_ctrl[intf][ld]);
152 				}
153 			}
154 		}
155 	}
156 }
157 
158 static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
159 				     u8 intf)
160 {
161 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
162 	int lid, lt, ld, hash_cnt = 0;
163 
164 	if (is_npc_intf_rx(intf))
165 		return;
166 
167 	/* Program HASH_CFG */
168 	for (lid = 0; lid < NPC_MAX_LID; lid++) {
169 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
170 			for (ld = 0; ld < NPC_MAX_LD; ld++)
171 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
172 					u64 cfg = npc_update_use_hash(lt, ld);
173 
174 					hash_cnt++;
175 					if (hash_cnt == NPC_MAX_HASH)
176 						return;
177 
178 					/* Set updated KEX configuration */
179 					SET_KEX_LD(intf, lid, lt, ld, cfg);
180 					/* Set HASH configuration */
181 					SET_KEX_LD_HASH(intf, ld,
182 							mkex_hash->hash[intf][ld]);
183 					SET_KEX_LD_HASH_MASK(intf, ld, 0,
184 							     mkex_hash->hash_mask[intf][ld][0]);
185 					SET_KEX_LD_HASH_MASK(intf, ld, 1,
186 							     mkex_hash->hash_mask[intf][ld][1]);
187 					SET_KEX_LD_HASH_CTRL(intf, ld,
188 							     mkex_hash->hash_ctrl[intf][ld]);
189 					hash_cnt++;
190 					if (hash_cnt == NPC_MAX_HASH)
191 						return;
192 				}
193 		}
194 	}
195 }
196 
197 void npc_config_secret_key(struct rvu *rvu, int blkaddr)
198 {
199 	struct hw_cap *hwcap = &rvu->hw->cap;
200 	struct rvu_hwinfo *hw = rvu->hw;
201 	u8 intf;
202 
203 	if (!hwcap->npc_hash_extract)
204 		return;
205 
206 	for (intf = 0; intf < hw->npc_intfs; intf++) {
207 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
208 			    RVU_NPC_HASH_SECRET_KEY0);
209 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
210 			    RVU_NPC_HASH_SECRET_KEY1);
211 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
212 			    RVU_NPC_HASH_SECRET_KEY2);
213 	}
214 }
215 
216 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
217 {
218 	struct hw_cap *hwcap = &rvu->hw->cap;
219 	struct rvu_hwinfo *hw = rvu->hw;
220 	u8 intf;
221 
222 	if (!hwcap->npc_hash_extract)
223 		return;
224 
225 	for (intf = 0; intf < hw->npc_intfs; intf++) {
226 		npc_program_mkex_hash_rx(rvu, blkaddr, intf);
227 		npc_program_mkex_hash_tx(rvu, blkaddr, intf);
228 	}
229 }
230 
231 void npc_update_field_hash(struct rvu *rvu, u8 intf,
232 			   struct mcam_entry *entry,
233 			   int blkaddr,
234 			   u64 features,
235 			   struct flow_msg *pkt,
236 			   struct flow_msg *mask,
237 			   struct flow_msg *opkt,
238 			   struct flow_msg *omask)
239 {
240 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
241 	struct npc_get_secret_key_req req;
242 	struct npc_get_secret_key_rsp rsp;
243 	u64 ldata[2], cfg;
244 	u32 field_hash;
245 	u8 hash_idx;
246 
247 	if (!rvu->hw->cap.npc_hash_extract) {
248 		dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
249 		return;
250 	}
251 
252 	req.intf = intf;
253 	rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp);
254 
255 	for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
256 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
257 		if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
258 			u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
259 			u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
260 			u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
261 
262 			if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
263 				switch (ltype & ltype_mask) {
264 				/* If hash extract enabled is supported for IPv6 then
265 				 * 128 bit IPv6 source and destination addressed
266 				 * is hashed to 32 bit value.
267 				 */
268 				case NPC_LT_LC_IP6:
269 					if (features & BIT_ULL(NPC_SIP_IPV6)) {
270 						u32 src_ip[IPV6_WORDS];
271 
272 						be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
273 						ldata[0] = (u64)src_ip[0] << 32 | src_ip[1];
274 						ldata[1] = (u64)src_ip[2] << 32 | src_ip[3];
275 						field_hash = npc_field_hash_calc(ldata,
276 										 mkex_hash,
277 										 rsp.secret_key,
278 										 intf,
279 										 hash_idx);
280 						npc_update_entry(rvu, NPC_SIP_IPV6, entry,
281 								 field_hash, 0, 32, 0, intf);
282 						memcpy(&opkt->ip6src, &pkt->ip6src,
283 						       sizeof(pkt->ip6src));
284 						memcpy(&omask->ip6src, &mask->ip6src,
285 						       sizeof(mask->ip6src));
286 						break;
287 					}
288 
289 					if (features & BIT_ULL(NPC_DIP_IPV6)) {
290 						u32 dst_ip[IPV6_WORDS];
291 
292 						be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
293 						ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1];
294 						ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3];
295 						field_hash = npc_field_hash_calc(ldata,
296 										 mkex_hash,
297 										 rsp.secret_key,
298 										 intf,
299 										 hash_idx);
300 						npc_update_entry(rvu, NPC_DIP_IPV6, entry,
301 								 field_hash, 0, 32, 0, intf);
302 						memcpy(&opkt->ip6dst, &pkt->ip6dst,
303 						       sizeof(pkt->ip6dst));
304 						memcpy(&omask->ip6dst, &mask->ip6dst,
305 						       sizeof(mask->ip6dst));
306 					}
307 					break;
308 				}
309 			}
310 		}
311 	}
312 }
313 
314 int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
315 					struct npc_get_secret_key_req *req,
316 					struct npc_get_secret_key_rsp *rsp)
317 {
318 	u64 *secret_key = rsp->secret_key;
319 	u8 intf = req->intf;
320 	int blkaddr;
321 
322 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
323 	if (blkaddr < 0) {
324 		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
325 		return -EINVAL;
326 	}
327 
328 	secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
329 	secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
330 	secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
331 
332 	return 0;
333 }
334 
335 /**
336  *	rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
337  *	@mac_addr: MAC address.
338  *	Return: mdata for exact match table.
339  */
340 static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
341 {
342 	u64 mac = 0;
343 	int index;
344 
345 	for (index = ETH_ALEN - 1; index >= 0; index--)
346 		mac |= ((u64)*mac_addr++) << (8 * index);
347 
348 	return mac;
349 }
350 
351 /**
352  *	rvu_exact_prepare_mdata - Make mdata for mcam entry
353  *	@mac: MAC address
354  *	@chan: Channel number.
355  *	@ctype: Channel Type.
356  *	@mask: LDATA mask.
357  *	Return: Meta data
358  */
359 static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
360 {
361 	u64 ldata = rvu_npc_exact_mac2u64(mac);
362 
363 	/* Please note that mask is 48bit which excludes chan and ctype.
364 	 * Increase mask bits if we need to include them as well.
365 	 */
366 	ldata |= ((u64)chan << 48);
367 	ldata |= ((u64)ctype  << 60);
368 	ldata &= mask;
369 	ldata = ldata << 2;
370 
371 	return ldata;
372 }
373 
374 /**
375  *      rvu_exact_calculate_hash - calculate hash index to mem table.
376  *	@rvu: resource virtualization unit.
377  *	@chan: Channel number
378  *	@ctype: Channel type.
379  *	@mac: MAC address
380  *	@mask: HASH mask.
381  *	@table_depth: Depth of table.
382  *	Return: Hash value
383  */
384 static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
385 				    u64 mask, u32 table_depth)
386 {
387 	struct npc_exact_table *table = rvu->hw->table;
388 	u64 hash_key[2];
389 	u64 key_in[2];
390 	u64 ldata;
391 	u32 hash;
392 
393 	key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
394 	key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
395 
396 	hash_key[0] = key_in[0] << 31;
397 	hash_key[0] |= key_in[1];
398 	hash_key[1] = key_in[0] >> 33;
399 
400 	ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
401 
402 	dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
403 		ldata, hash_key[1], hash_key[0]);
404 	hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
405 
406 	hash &= table->mem_table.hash_mask;
407 	hash += table->mem_table.hash_offset;
408 	dev_dbg(rvu->dev, "%s: hash=%x\n", __func__,  hash);
409 
410 	return hash;
411 }
412 
413 /**
414  *      rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
415  *      @rvu: resource virtualization unit.
416  *	@way: Indicate way to table.
417  *	@index: Hash index to 4 way table.
418  *	@hash: Hash value.
419  *
420  *	Searches 4 way table using hash index. Returns 0 on success.
421  *	Return: 0 upon success.
422  */
423 static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
424 					       u32 *index, unsigned int hash)
425 {
426 	struct npc_exact_table *table;
427 	int depth, i;
428 
429 	table = rvu->hw->table;
430 	depth = table->mem_table.depth;
431 
432 	/* Check all the 4 ways for a free slot. */
433 	mutex_lock(&table->lock);
434 	for (i = 0; i <  table->mem_table.ways; i++) {
435 		if (test_bit(hash + i * depth, table->mem_table.bmap))
436 			continue;
437 
438 		set_bit(hash + i * depth, table->mem_table.bmap);
439 		mutex_unlock(&table->lock);
440 
441 		dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
442 			__func__, i, hash);
443 
444 		*way = i;
445 		*index = hash;
446 		return 0;
447 	}
448 	mutex_unlock(&table->lock);
449 
450 	dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
451 		bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
452 	return -ENOSPC;
453 }
454 
455 /**
456  *	rvu_npc_exact_free_id - Free seq id from bitmat.
457  *	@rvu: Resource virtualization unit.
458  *	@seq_id: Sequence identifier to be freed.
459  */
460 static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
461 {
462 	struct npc_exact_table *table;
463 
464 	table = rvu->hw->table;
465 	mutex_lock(&table->lock);
466 	clear_bit(seq_id, table->id_bmap);
467 	mutex_unlock(&table->lock);
468 	dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
469 }
470 
471 /**
472  *	rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
473  *	@rvu: Resource virtualization unit.
474  *	@seq_id: Sequence identifier.
475  *	Return: True or false.
476  */
477 static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
478 {
479 	struct npc_exact_table *table;
480 	u32 idx;
481 
482 	table = rvu->hw->table;
483 
484 	mutex_lock(&table->lock);
485 	idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
486 	if (idx == table->tot_ids) {
487 		mutex_unlock(&table->lock);
488 		dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
489 			__func__, table->tot_ids);
490 
491 		return false;
492 	}
493 
494 	/* Mark bit map to indicate that slot is used.*/
495 	set_bit(idx, table->id_bmap);
496 	mutex_unlock(&table->lock);
497 
498 	*seq_id = idx;
499 	dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
500 
501 	return true;
502 }
503 
504 /**
505  *      rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
506  *      @rvu: resource virtualization unit.
507  *	@index: Index to exact CAM table.
508  *	Return: 0 upon success; else error number.
509  */
510 static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
511 {
512 	struct npc_exact_table *table;
513 	u32 idx;
514 
515 	table = rvu->hw->table;
516 
517 	mutex_lock(&table->lock);
518 	idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
519 	if (idx == table->cam_table.depth) {
520 		mutex_unlock(&table->lock);
521 		dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
522 			 bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
523 		return -ENOSPC;
524 	}
525 
526 	/* Mark bit map to indicate that slot is used.*/
527 	set_bit(idx, table->cam_table.bmap);
528 	mutex_unlock(&table->lock);
529 
530 	*index = idx;
531 	dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
532 		__func__, idx);
533 	return 0;
534 }
535 
536 /**
537  *	rvu_exact_prepare_table_entry - Data for exact match table entry.
538  *	@rvu: Resource virtualization unit.
539  *	@enable: Enable/Disable entry
540  *	@ctype: Software defined channel type. Currently set as 0.
541  *	@chan: Channel number.
542  *	@mac_addr: Destination mac address.
543  *	Return: mdata for exact match table.
544  */
545 static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
546 					 u8 ctype, u16 chan, u8 *mac_addr)
547 
548 {
549 	u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
550 
551 	/* Enable or disable */
552 	u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
553 
554 	/* Set Ctype */
555 	mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
556 
557 	/* Set chan */
558 	mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
559 
560 	/* MAC address */
561 	mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
562 
563 	return mdata;
564 }
565 
566 /**
567  *	rvu_exact_config_secret_key - Configure secret key.
568  *	@rvu: Resource virtualization unit.
569  */
570 static void rvu_exact_config_secret_key(struct rvu *rvu)
571 {
572 	int blkaddr;
573 
574 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
575 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
576 		    RVU_NPC_HASH_SECRET_KEY0);
577 
578 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
579 		    RVU_NPC_HASH_SECRET_KEY1);
580 
581 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
582 		    RVU_NPC_HASH_SECRET_KEY2);
583 }
584 
585 /**
586  *	rvu_exact_config_search_key - Configure search key
587  *	@rvu: Resource virtualization unit.
588  */
589 static void rvu_exact_config_search_key(struct rvu *rvu)
590 {
591 	int blkaddr;
592 	u64 reg_val;
593 
594 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
595 
596 	/* HDR offset */
597 	reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
598 
599 	/* BYTESM1, number of bytes - 1 */
600 	reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
601 
602 	/* Enable LID and set LID to  NPC_LID_LA */
603 	reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
604 	reg_val |= FIELD_PREP(GENMASK_ULL(10, 8),  NPC_LID_LA);
605 
606 	/* Clear layer type based extraction */
607 
608 	/* Disable LT_EN */
609 	reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
610 
611 	/* Set LTYPE_MATCH to 0 */
612 	reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
613 
614 	/* Set LTYPE_MASK to 0 */
615 	reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
616 
617 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
618 }
619 
620 /**
621  *	rvu_exact_config_result_ctrl - Set exact table hash control
622  *	@rvu: Resource virtualization unit.
623  *	@depth: Depth of Exact match table.
624  *
625  *	Sets mask and offset for hash for mem table.
626  */
627 static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
628 {
629 	int blkaddr;
630 	u64 reg = 0;
631 
632 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
633 
634 	/* Set mask. Note that depth is a power of 2 */
635 	rvu->hw->table->mem_table.hash_mask = (depth - 1);
636 	reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
637 
638 	/* Set offset as 0 */
639 	rvu->hw->table->mem_table.hash_offset = 0;
640 	reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
641 
642 	/* Set reg for RX */
643 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
644 	/* Store hash mask and offset for s/w algorithm */
645 }
646 
647 /**
648  *	rvu_exact_config_table_mask - Set exact table mask.
649  *	@rvu: Resource virtualization unit.
650  */
651 static void rvu_exact_config_table_mask(struct rvu *rvu)
652 {
653 	int blkaddr;
654 	u64 mask = 0;
655 
656 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
657 
658 	/* Don't use Ctype */
659 	mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
660 
661 	/* Set chan */
662 	mask |= GENMASK_ULL(59, 48);
663 
664 	/* Full ldata */
665 	mask |= GENMASK_ULL(47, 0);
666 
667 	/* Store mask for s/w hash calcualtion */
668 	rvu->hw->table->mem_table.mask = mask;
669 
670 	/* Set mask for RX.*/
671 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
672 }
673 
674 /**
675  *      rvu_npc_exact_get_max_entries - Get total number of entries in table.
676  *      @rvu: resource virtualization unit.
677  *	Return: Maximum table entries possible.
678  */
679 u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
680 {
681 	struct npc_exact_table *table;
682 
683 	table = rvu->hw->table;
684 	return table->tot_ids;
685 }
686 
687 /**
688  *      rvu_npc_exact_has_match_table - Checks support for exact match.
689  *      @rvu: resource virtualization unit.
690  *	Return: True if exact match table is supported/enabled.
691  */
692 bool rvu_npc_exact_has_match_table(struct rvu *rvu)
693 {
694 	return  rvu->hw->cap.npc_exact_match_enabled;
695 }
696 
697 /**
698  *      __rvu_npc_exact_find_entry_by_seq_id - find entry by id
699  *      @rvu: resource virtualization unit.
700  *	@seq_id: Sequence identifier.
701  *
702  *	Caller should acquire the lock.
703  *	Return: Pointer to table entry.
704  */
705 static struct npc_exact_table_entry *
706 __rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
707 {
708 	struct npc_exact_table *table = rvu->hw->table;
709 	struct npc_exact_table_entry *entry = NULL;
710 	struct list_head *lhead;
711 
712 	lhead = &table->lhead_gbl;
713 
714 	/* traverse to find the matching entry */
715 	list_for_each_entry(entry, lhead, glist) {
716 		if (entry->seq_id != seq_id)
717 			continue;
718 
719 		return entry;
720 	}
721 
722 	return NULL;
723 }
724 
725 /**
726  *      rvu_npc_exact_add_to_list - Add entry to list
727  *      @rvu: resource virtualization unit.
728  *	@opc_type: OPCODE to select MEM/CAM table.
729  *	@ways: MEM table ways.
730  *	@index: Index in MEM/CAM table.
731  *	@cgx_id: CGX identifier.
732  *	@lmac_id: LMAC identifier.
733  *	@mac_addr: MAC address.
734  *	@chan: Channel number.
735  *	@ctype: Channel Type.
736  *	@seq_id: Sequence identifier
737  *	@cmd: True if function is called by ethtool cmd
738  *	@mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
739  *	@pcifunc: pci function
740  *	Return: 0 upon success.
741  */
742 static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
743 				     u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
744 				     u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
745 {
746 	struct npc_exact_table_entry *entry, *tmp, *iter;
747 	struct npc_exact_table *table = rvu->hw->table;
748 	struct list_head *lhead, *pprev;
749 
750 	WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
751 
752 	if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
753 		dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
754 		return -EFAULT;
755 	}
756 
757 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
758 	if (!entry) {
759 		rvu_npc_exact_free_id(rvu, *seq_id);
760 		dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
761 		return -ENOMEM;
762 	}
763 
764 	mutex_lock(&table->lock);
765 	switch (opc_type) {
766 	case NPC_EXACT_OPC_CAM:
767 		lhead = &table->lhead_cam_tbl_entry;
768 		table->cam_tbl_entry_cnt++;
769 		break;
770 
771 	case NPC_EXACT_OPC_MEM:
772 		lhead = &table->lhead_mem_tbl_entry[ways];
773 		table->mem_tbl_entry_cnt++;
774 		break;
775 
776 	default:
777 		mutex_unlock(&table->lock);
778 		kfree(entry);
779 		rvu_npc_exact_free_id(rvu, *seq_id);
780 
781 		dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
782 		return  -EINVAL;
783 	}
784 
785 	/* Add to global list */
786 	INIT_LIST_HEAD(&entry->glist);
787 	list_add_tail(&entry->glist, &table->lhead_gbl);
788 	INIT_LIST_HEAD(&entry->list);
789 	entry->index = index;
790 	entry->ways = ways;
791 	entry->opc_type = opc_type;
792 
793 	entry->pcifunc = pcifunc;
794 
795 	ether_addr_copy(entry->mac, mac_addr);
796 	entry->chan = chan;
797 	entry->ctype = ctype;
798 	entry->cgx_id = cgx_id;
799 	entry->lmac_id = lmac_id;
800 
801 	entry->seq_id = *seq_id;
802 
803 	entry->mcam_idx = mcam_idx;
804 	entry->cmd = cmd;
805 
806 	pprev = lhead;
807 
808 	/* Insert entry in ascending order of index */
809 	list_for_each_entry_safe(iter, tmp, lhead, list) {
810 		if (index < iter->index)
811 			break;
812 
813 		pprev = &iter->list;
814 	}
815 
816 	/* Add to each table list */
817 	list_add(&entry->list, pprev);
818 	mutex_unlock(&table->lock);
819 	return 0;
820 }
821 
822 /**
823  *	rvu_npc_exact_mem_table_write - Wrapper for register write
824  *	@rvu: resource virtualization unit.
825  *	@blkaddr: Block address
826  *	@ways: ways for MEM table.
827  *	@index: Index in MEM
828  *	@mdata: Meta data to be written to register.
829  */
830 static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
831 					  u32 index, u64 mdata)
832 {
833 	rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
834 }
835 
836 /**
837  *	rvu_npc_exact_cam_table_write - Wrapper for register write
838  *	@rvu: resource virtualization unit.
839  *	@blkaddr: Block address
840  *	@index: Index in MEM
841  *	@mdata: Meta data to be written to register.
842  */
843 static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
844 					  u32 index, u64 mdata)
845 {
846 	rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
847 }
848 
849 /**
850  *      rvu_npc_exact_dealloc_table_entry - dealloc table entry
851  *      @rvu: resource virtualization unit.
852  *	@opc_type: OPCODE for selection of table(MEM or CAM)
853  *	@ways: ways if opc_type is MEM table.
854  *	@index: Index of MEM or CAM table.
855  *	Return: 0 upon success.
856  */
857 static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
858 					     u8 ways, u32 index)
859 {
860 	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
861 	struct npc_exact_table *table;
862 	u8 null_dmac[6] = { 0 };
863 	int depth;
864 
865 	/* Prepare entry with all fields set to zero */
866 	u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
867 
868 	table = rvu->hw->table;
869 	depth = table->mem_table.depth;
870 
871 	mutex_lock(&table->lock);
872 
873 	switch (opc_type) {
874 	case NPC_EXACT_OPC_CAM:
875 
876 		/* Check whether entry is used already */
877 		if (!test_bit(index, table->cam_table.bmap)) {
878 			mutex_unlock(&table->lock);
879 			dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
880 				__func__, ways, index);
881 			return -EINVAL;
882 		}
883 
884 		rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
885 		clear_bit(index, table->cam_table.bmap);
886 		break;
887 
888 	case NPC_EXACT_OPC_MEM:
889 
890 		/* Check whether entry is used already */
891 		if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
892 			mutex_unlock(&table->lock);
893 			dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
894 				__func__, index);
895 			return -EINVAL;
896 		}
897 
898 		rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
899 		clear_bit(index + ways * depth, table->mem_table.bmap);
900 		break;
901 
902 	default:
903 		mutex_unlock(&table->lock);
904 		dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
905 		return -ENOSPC;
906 	}
907 
908 	mutex_unlock(&table->lock);
909 
910 	dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
911 		__func__, index,  ways, opc_type);
912 
913 	return 0;
914 }
915 
916 /**
917  *	rvu_npc_exact_alloc_table_entry - Allociate an entry
918  *      @rvu: resource virtualization unit.
919  *	@mac: MAC address.
920  *	@chan: Channel number.
921  *	@ctype: Channel Type.
922  *	@index: Index of MEM table or CAM table.
923  *	@ways: Ways. Only valid for MEM table.
924  *	@opc_type: OPCODE to select table (MEM or CAM)
925  *
926  *	Try allocating a slot from MEM table. If all 4 ways
927  *	slot are full for a hash index, check availability in
928  *	32-entry CAM table for allocation.
929  *	Return: 0 upon success.
930  */
931 static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu,  char *mac, u16 chan, u8 ctype,
932 					   u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
933 {
934 	struct npc_exact_table *table;
935 	unsigned int hash;
936 	int err;
937 
938 	table = rvu->hw->table;
939 
940 	/* Check in 4-ways mem entry for free slote */
941 	hash =  rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
942 					 table->mem_table.depth);
943 	err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
944 	if (!err) {
945 		*opc_type = NPC_EXACT_OPC_MEM;
946 		dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
947 			__func__, *ways, *index);
948 		return 0;
949 	}
950 
951 	dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
952 
953 	/* wayss is 0 for cam table */
954 	*ways = 0;
955 	err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
956 	if (!err) {
957 		*opc_type = NPC_EXACT_OPC_CAM;
958 		dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
959 			__func__, *index);
960 		return 0;
961 	}
962 
963 	dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
964 	return -ENOSPC;
965 }
966 
967 /**
968  *	rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
969  *      @rvu: resource virtualization unit.
970  *	@drop_mcam_idx: Drop rule index in NPC mcam.
971  *	@chan_val: Channel value.
972  *	@chan_mask: Channel Mask.
973  *	@pcifunc: pcifunc of interface.
974  *	Return: True upon success.
975  */
976 static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
977 						       u64 chan_val, u64 chan_mask, u16 pcifunc)
978 {
979 	struct npc_exact_table *table;
980 	int i;
981 
982 	table = rvu->hw->table;
983 
984 	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
985 		if (!table->drop_rule_map[i].valid)
986 			break;
987 
988 		if (table->drop_rule_map[i].chan_val != (u16)chan_val)
989 			continue;
990 
991 		if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
992 			continue;
993 
994 		return false;
995 	}
996 
997 	if (i == NPC_MCAM_DROP_RULE_MAX)
998 		return false;
999 
1000 	table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
1001 	table->drop_rule_map[i].chan_val = (u16)chan_val;
1002 	table->drop_rule_map[i].chan_mask = (u16)chan_mask;
1003 	table->drop_rule_map[i].pcifunc = pcifunc;
1004 	table->drop_rule_map[i].valid = true;
1005 	return true;
1006 }
1007 
1008 /**
1009  *	rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
1010  *      @rvu: resource virtualization unit.
1011  *	@intf_type: Interface type (SDK, LBK or CGX)
1012  *	@cgx_id: CGX identifier.
1013  *	@lmac_id: LAMC identifier.
1014  *	@val: Channel number.
1015  *	@mask: Channel mask.
1016  *	Return: True upon success.
1017  */
1018 static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
1019 						       u8 cgx_id, u8 lmac_id,
1020 						       u64 *val, u64 *mask)
1021 {
1022 	u16 chan_val, chan_mask;
1023 
1024 	/* No support for SDP and LBK */
1025 	if (intf_type != NIX_INTF_TYPE_CGX)
1026 		return false;
1027 
1028 	chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
1029 	chan_mask = 0xfff;
1030 
1031 	if (val)
1032 		*val = chan_val;
1033 
1034 	if (mask)
1035 		*mask = chan_mask;
1036 
1037 	return true;
1038 }
1039 
1040 /**
1041  *	rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
1042  *      @rvu: resource virtualization unit.
1043  *	@drop_rule_idx: Drop rule index in NPC mcam.
1044  *
1045  *	Debugfs (exact_drop_cnt) entry displays pcifunc for interface
1046  *	by retrieving the pcifunc value from data base.
1047  *	Return: Drop rule index.
1048  */
1049 u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
1050 {
1051 	struct npc_exact_table *table;
1052 	int i;
1053 
1054 	table = rvu->hw->table;
1055 
1056 	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1057 		if (!table->drop_rule_map[i].valid)
1058 			break;
1059 
1060 		if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
1061 			continue;
1062 
1063 		return table->drop_rule_map[i].pcifunc;
1064 	}
1065 
1066 	dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1067 		__func__, drop_rule_idx);
1068 	return -1;
1069 }
1070 
1071 /**
1072  *	rvu_npc_exact_get_drop_rule_info - Get drop rule information.
1073  *      @rvu: resource virtualization unit.
1074  *	@intf_type: Interface type (CGX, SDP or LBK)
1075  *	@cgx_id: CGX identifier.
1076  *	@lmac_id: LMAC identifier.
1077  *	@drop_mcam_idx: NPC mcam drop rule index.
1078  *	@val: Channel value.
1079  *	@mask: Channel mask.
1080  *	@pcifunc: pcifunc of interface corresponding to the drop rule.
1081  *	Return: True upon success.
1082  */
1083 static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
1084 					     u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
1085 					     u64 *mask, u16 *pcifunc)
1086 {
1087 	struct npc_exact_table *table;
1088 	u64 chan_val, chan_mask;
1089 	bool rc;
1090 	int i;
1091 
1092 	table = rvu->hw->table;
1093 
1094 	if (intf_type != NIX_INTF_TYPE_CGX) {
1095 		dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
1096 		return false;
1097 	}
1098 
1099 	rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
1100 							lmac_id, &chan_val, &chan_mask);
1101 	if (!rc)
1102 		return false;
1103 
1104 	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1105 		if (!table->drop_rule_map[i].valid)
1106 			break;
1107 
1108 		if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1109 			continue;
1110 
1111 		if (val)
1112 			*val = table->drop_rule_map[i].chan_val;
1113 		if (mask)
1114 			*mask = table->drop_rule_map[i].chan_mask;
1115 		if (pcifunc)
1116 			*pcifunc = table->drop_rule_map[i].pcifunc;
1117 
1118 		*drop_mcam_idx = i;
1119 		return true;
1120 	}
1121 
1122 	if (i == NPC_MCAM_DROP_RULE_MAX) {
1123 		dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1124 			__func__, *drop_mcam_idx);
1125 		return false;
1126 	}
1127 
1128 	dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
1129 		__func__, cgx_id, lmac_id);
1130 	return false;
1131 }
1132 
1133 /**
1134  *	__rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
1135  *      @rvu: resource virtualization unit.
1136  *	@drop_mcam_idx: NPC mcam drop rule index.
1137  *	@val: +1 or -1.
1138  *	@enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
1139  *
1140  *	when first exact match entry against a drop rule is added, enable_or_disable_cam
1141  *	is set to true. When last exact match entry against a drop rule is deleted,
1142  *	enable_or_disable_cam is set to true.
1143  *	Return: Number of rules
1144  */
1145 static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
1146 						int val, bool *enable_or_disable_cam)
1147 {
1148 	struct npc_exact_table *table;
1149 	u16 *cnt, old_cnt;
1150 	bool promisc;
1151 
1152 	table = rvu->hw->table;
1153 	promisc = table->promisc_mode[drop_mcam_idx];
1154 
1155 	cnt = &table->cnt_cmd_rules[drop_mcam_idx];
1156 	old_cnt = *cnt;
1157 
1158 	*cnt += val;
1159 
1160 	if (!enable_or_disable_cam)
1161 		goto done;
1162 
1163 	*enable_or_disable_cam = false;
1164 
1165 	if (promisc)
1166 		goto done;
1167 
1168 	/* If all rules are deleted and not already in promisc mode; disable cam */
1169 	if (!*cnt && val < 0) {
1170 		*enable_or_disable_cam = true;
1171 		goto done;
1172 	}
1173 
1174 	/* If rule got added and not already in promisc mode; enable cam */
1175 	if (!old_cnt && val > 0) {
1176 		*enable_or_disable_cam = true;
1177 		goto done;
1178 	}
1179 
1180 done:
1181 	return *cnt;
1182 }
1183 
1184 /**
1185  *      rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
1186  *      @rvu: resource virtualization unit.
1187  *	@seq_id: Sequence identifier of the entry.
1188  *
1189  *	Deletes entry from linked lists and free up slot in HW MEM or CAM
1190  *	table.
1191  *	Return: 0 upon success.
1192  */
1193 static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
1194 {
1195 	struct npc_exact_table_entry *entry = NULL;
1196 	struct npc_exact_table *table;
1197 	bool disable_cam = false;
1198 	u32 drop_mcam_idx = -1;
1199 	int *cnt;
1200 	bool rc;
1201 
1202 	table = rvu->hw->table;
1203 
1204 	mutex_lock(&table->lock);
1205 
1206 	/* Lookup for entry which needs to be updated */
1207 	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
1208 	if (!entry) {
1209 		dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
1210 		mutex_unlock(&table->lock);
1211 		return -ENODATA;
1212 	}
1213 
1214 	cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
1215 				&table->mem_tbl_entry_cnt;
1216 
1217 	/* delete from lists */
1218 	list_del_init(&entry->list);
1219 	list_del_init(&entry->glist);
1220 
1221 	(*cnt)--;
1222 
1223 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
1224 					      entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
1225 	if (!rc) {
1226 		dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
1227 			__func__, seq_id);
1228 		mutex_unlock(&table->lock);
1229 		return -ENODATA;
1230 	}
1231 
1232 	if (entry->cmd)
1233 		__rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
1234 
1235 	/* No dmac filter rules; disable drop on hit rule */
1236 	if (disable_cam) {
1237 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1238 		dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
1239 			__func__, drop_mcam_idx);
1240 	}
1241 
1242 	mutex_unlock(&table->lock);
1243 
1244 	rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
1245 
1246 	rvu_npc_exact_free_id(rvu, seq_id);
1247 
1248 	dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
1249 		__func__, seq_id, entry->mac);
1250 	kfree(entry);
1251 
1252 	return 0;
1253 }
1254 
1255 /**
1256  *      rvu_npc_exact_add_table_entry - Adds a table entry
1257  *      @rvu: resource virtualization unit.
1258  *	@cgx_id: cgx identifier.
1259  *	@lmac_id: lmac identifier.
1260  *	@mac: MAC address.
1261  *	@chan: Channel number.
1262  *	@ctype: Channel Type.
1263  *	@seq_id: Sequence number.
1264  *	@cmd: Whether it is invoked by ethtool cmd.
1265  *	@mcam_idx: NPC mcam index corresponding to MAC
1266  *	@pcifunc: PCI func.
1267  *
1268  *	Creates a new exact match table entry in either CAM or
1269  *	MEM table.
1270  *	Return: 0 upon success.
1271  */
1272 static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
1273 					 u16 chan, u8 ctype, u32 *seq_id, bool cmd,
1274 					 u32 mcam_idx, u16 pcifunc)
1275 {
1276 	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1277 	enum npc_exact_opc_type opc_type;
1278 	bool enable_cam = false;
1279 	u32 drop_mcam_idx;
1280 	u32 index;
1281 	u64 mdata;
1282 	bool rc;
1283 	int err;
1284 	u8 ways;
1285 
1286 	ctype = 0;
1287 
1288 	err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
1289 	if (err) {
1290 		dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
1291 		return err;
1292 	}
1293 
1294 	/* Write mdata to table */
1295 	mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
1296 
1297 	if (opc_type == NPC_EXACT_OPC_CAM)
1298 		rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
1299 	else
1300 		rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index,  mdata);
1301 
1302 	/* Insert entry to linked list */
1303 	err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
1304 					mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
1305 	if (err) {
1306 		rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1307 		dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
1308 		return err;
1309 	}
1310 
1311 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1312 					      &drop_mcam_idx, NULL, NULL, NULL);
1313 	if (!rc) {
1314 		rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1315 		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1316 			__func__, cgx_id, lmac_id);
1317 		return -EINVAL;
1318 	}
1319 
1320 	if (cmd)
1321 		__rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
1322 
1323 	/* First command rule; enable drop on hit rule */
1324 	if (enable_cam) {
1325 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
1326 		dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
1327 			__func__, drop_mcam_idx);
1328 	}
1329 
1330 	dev_dbg(rvu->dev,
1331 		"%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1332 		__func__, index, mac, ways, opc_type);
1333 
1334 	return 0;
1335 }
1336 
1337 /**
1338  *      rvu_npc_exact_update_table_entry - Update exact match table.
1339  *      @rvu: resource virtualization unit.
1340  *	@cgx_id: CGX identifier.
1341  *	@lmac_id: LMAC identifier.
1342  *	@old_mac: Existing MAC address entry.
1343  *	@new_mac: New MAC address entry.
1344  *	@seq_id: Sequence identifier of the entry.
1345  *
1346  *	Updates MAC address of an entry. If entry is in MEM table, new
1347  *	hash value may not match with old one.
1348  *	Return: 0 upon success.
1349  */
1350 static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
1351 					    u8 *old_mac, u8 *new_mac, u32 *seq_id)
1352 {
1353 	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1354 	struct npc_exact_table_entry *entry;
1355 	struct npc_exact_table *table;
1356 	u32 hash_index;
1357 	u64 mdata;
1358 
1359 	table = rvu->hw->table;
1360 
1361 	mutex_lock(&table->lock);
1362 
1363 	/* Lookup for entry which needs to be updated */
1364 	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
1365 	if (!entry) {
1366 		mutex_unlock(&table->lock);
1367 		dev_dbg(rvu->dev,
1368 			"%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
1369 			__func__, cgx_id, lmac_id, old_mac);
1370 		return -ENODATA;
1371 	}
1372 
1373 	/* If entry is in mem table and new hash index is different than old
1374 	 * hash index, we cannot update the entry. Fail in these scenarios.
1375 	 */
1376 	if (entry->opc_type == NPC_EXACT_OPC_MEM) {
1377 		hash_index =  rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
1378 						       new_mac, table->mem_table.mask,
1379 						       table->mem_table.depth);
1380 		if (hash_index != entry->index) {
1381 			dev_dbg(rvu->dev,
1382 				"%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
1383 				__func__, hash_index, entry->index);
1384 			mutex_unlock(&table->lock);
1385 			return -EINVAL;
1386 		}
1387 	}
1388 
1389 	mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
1390 
1391 	if (entry->opc_type == NPC_EXACT_OPC_MEM)
1392 		rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
1393 	else
1394 		rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
1395 
1396 	/* Update entry fields */
1397 	ether_addr_copy(entry->mac, new_mac);
1398 	*seq_id = entry->seq_id;
1399 
1400 	dev_dbg(rvu->dev,
1401 		"%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1402 		__func__, entry->index, entry->mac, entry->ways, entry->opc_type);
1403 
1404 	dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
1405 		__func__, old_mac, new_mac);
1406 
1407 	mutex_unlock(&table->lock);
1408 	return 0;
1409 }
1410 
1411 /**
1412  *	rvu_npc_exact_promisc_disable - Disable promiscuous mode.
1413  *      @rvu: resource virtualization unit.
1414  *	@pcifunc: pcifunc
1415  *
1416  *	Drop rule is against each PF. We dont support DMAC filter for
1417  *	VF.
1418  *	Return: 0 upon success
1419  */
1420 
1421 int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
1422 {
1423 	struct npc_exact_table *table;
1424 	int pf = rvu_get_pf(pcifunc);
1425 	u8 cgx_id, lmac_id;
1426 	u32 drop_mcam_idx;
1427 	bool *promisc;
1428 	bool rc;
1429 	u32 cnt;
1430 
1431 	table = rvu->hw->table;
1432 
1433 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1434 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1435 					      &drop_mcam_idx, NULL, NULL, NULL);
1436 	if (!rc) {
1437 		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1438 			__func__, cgx_id, lmac_id);
1439 		return -EINVAL;
1440 	}
1441 
1442 	mutex_lock(&table->lock);
1443 	promisc = &table->promisc_mode[drop_mcam_idx];
1444 
1445 	if (!*promisc) {
1446 		mutex_unlock(&table->lock);
1447 		dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
1448 			__func__, cgx_id, lmac_id);
1449 		return LMAC_AF_ERR_INVALID_PARAM;
1450 	}
1451 	*promisc = false;
1452 	cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
1453 	mutex_unlock(&table->lock);
1454 
1455 	/* If no dmac filter entries configured, disable drop rule */
1456 	if (!cnt)
1457 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1458 	else
1459 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
1460 
1461 	dev_dbg(rvu->dev, "%s: disabled  promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
1462 		__func__, cgx_id, lmac_id, cnt);
1463 	return 0;
1464 }
1465 
1466 /**
1467  *	rvu_npc_exact_promisc_enable - Enable promiscuous mode.
1468  *      @rvu: resource virtualization unit.
1469  *	@pcifunc: pcifunc.
1470  *	Return: 0 upon success
1471  */
1472 int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
1473 {
1474 	struct npc_exact_table *table;
1475 	int pf = rvu_get_pf(pcifunc);
1476 	u8 cgx_id, lmac_id;
1477 	u32 drop_mcam_idx;
1478 	bool *promisc;
1479 	bool rc;
1480 	u32 cnt;
1481 
1482 	table = rvu->hw->table;
1483 
1484 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1485 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1486 					      &drop_mcam_idx, NULL, NULL, NULL);
1487 	if (!rc) {
1488 		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1489 			__func__, cgx_id, lmac_id);
1490 		return -EINVAL;
1491 	}
1492 
1493 	mutex_lock(&table->lock);
1494 	promisc = &table->promisc_mode[drop_mcam_idx];
1495 
1496 	if (*promisc) {
1497 		mutex_unlock(&table->lock);
1498 		dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
1499 			__func__, cgx_id, lmac_id);
1500 		return LMAC_AF_ERR_INVALID_PARAM;
1501 	}
1502 	*promisc = true;
1503 	cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
1504 	mutex_unlock(&table->lock);
1505 
1506 	/* If no dmac filter entries configured, disable drop rule */
1507 	if (!cnt)
1508 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1509 	else
1510 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
1511 
1512 	dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
1513 		__func__, cgx_id, lmac_id, cnt);
1514 	return 0;
1515 }
1516 
1517 /**
1518  *	rvu_npc_exact_mac_addr_reset - Delete PF mac address.
1519  *      @rvu: resource virtualization unit.
1520  *	@req: Reset request
1521  *	@rsp: Reset response.
1522  *	Return: 0 upon success
1523  */
1524 int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1525 				 struct msg_rsp *rsp)
1526 {
1527 	int pf = rvu_get_pf(req->hdr.pcifunc);
1528 	u32 seq_id = req->index;
1529 	struct rvu_pfvf *pfvf;
1530 	u8 cgx_id, lmac_id;
1531 	int rc;
1532 
1533 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1534 
1535 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1536 
1537 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1538 	if (rc) {
1539 		/* TODO: how to handle this error case ? */
1540 		dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
1541 		return 0;
1542 	}
1543 
1544 	dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
1545 		__func__, pfvf->mac_addr, pf, seq_id);
1546 	return 0;
1547 }
1548 
1549 /**
1550  *	rvu_npc_exact_mac_addr_update - Update mac address field with new value.
1551  *      @rvu: resource virtualization unit.
1552  *	@req: Update request.
1553  *	@rsp: Update response.
1554  *	Return: 0 upon success
1555  */
1556 int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
1557 				  struct cgx_mac_addr_update_req *req,
1558 				  struct cgx_mac_addr_update_rsp *rsp)
1559 {
1560 	int pf = rvu_get_pf(req->hdr.pcifunc);
1561 	struct npc_exact_table_entry *entry;
1562 	struct npc_exact_table *table;
1563 	struct rvu_pfvf *pfvf;
1564 	u32 seq_id, mcam_idx;
1565 	u8 old_mac[ETH_ALEN];
1566 	u8 cgx_id, lmac_id;
1567 	int rc;
1568 
1569 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1570 		return LMAC_AF_ERR_PERM_DENIED;
1571 
1572 	dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
1573 		__func__, req->index, req->mac_addr);
1574 
1575 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1576 
1577 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1578 
1579 	table = rvu->hw->table;
1580 
1581 	mutex_lock(&table->lock);
1582 
1583 	/* Lookup for entry which needs to be updated */
1584 	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
1585 	if (!entry) {
1586 		dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
1587 		mutex_unlock(&table->lock);
1588 		return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
1589 	}
1590 	ether_addr_copy(old_mac, entry->mac);
1591 	seq_id = entry->seq_id;
1592 	mcam_idx = entry->mcam_idx;
1593 	mutex_unlock(&table->lock);
1594 
1595 	rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id,  old_mac,
1596 					      req->mac_addr, &seq_id);
1597 	if (!rc) {
1598 		rsp->index = seq_id;
1599 		dev_dbg(rvu->dev, "%s  mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
1600 			__func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
1601 		ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1602 		return 0;
1603 	}
1604 
1605 	/* Try deleting and adding it again */
1606 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1607 	if (rc) {
1608 		/* This could be a new entry */
1609 		dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
1610 			pfvf->mac_addr, pf);
1611 	}
1612 
1613 	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1614 					   pfvf->rx_chan_base, 0, &seq_id, true,
1615 					   mcam_idx, req->hdr.pcifunc);
1616 	if (rc) {
1617 		dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
1618 			req->mac_addr, pf);
1619 		return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1620 	}
1621 
1622 	rsp->index = seq_id;
1623 	dev_dbg(rvu->dev,
1624 		"%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
1625 		__func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
1626 
1627 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1628 	return 0;
1629 }
1630 
1631 /**
1632  *	rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
1633  *      @rvu: resource virtualization unit.
1634  *	@req: Add request.
1635  *	@rsp: Add response.
1636  *	Return: 0 upon success
1637  */
1638 int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
1639 			       struct cgx_mac_addr_add_req *req,
1640 			       struct cgx_mac_addr_add_rsp *rsp)
1641 {
1642 	int pf = rvu_get_pf(req->hdr.pcifunc);
1643 	struct rvu_pfvf *pfvf;
1644 	u8 cgx_id, lmac_id;
1645 	int rc = 0;
1646 	u32 seq_id;
1647 
1648 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1649 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1650 
1651 	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1652 					   pfvf->rx_chan_base, 0, &seq_id,
1653 					   true, -1, req->hdr.pcifunc);
1654 
1655 	if (!rc) {
1656 		rsp->index = seq_id;
1657 		dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
1658 			__func__, req->mac_addr, pf, seq_id);
1659 		return 0;
1660 	}
1661 
1662 	dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
1663 		req->mac_addr, pf);
1664 	return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1665 }
1666 
1667 /**
1668  *	rvu_npc_exact_mac_addr_del - Delete DMAC filter
1669  *      @rvu: resource virtualization unit.
1670  *	@req: Delete request.
1671  *	@rsp: Delete response.
1672  *	Return: 0 upon success
1673  */
1674 int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
1675 			       struct cgx_mac_addr_del_req *req,
1676 			       struct msg_rsp *rsp)
1677 {
1678 	int pf = rvu_get_pf(req->hdr.pcifunc);
1679 	int rc;
1680 
1681 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1682 	if (!rc) {
1683 		dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
1684 			__func__, pf, req->index);
1685 		return 0;
1686 	}
1687 
1688 	dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
1689 		__func__,  pf, req->index);
1690 	return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
1691 }
1692 
1693 /**
1694  *	rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
1695  *      @rvu: resource virtualization unit.
1696  *	@req: Set request.
1697  *	@rsp: Set response.
1698  *	Return: 0 upon success
1699  */
1700 int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
1701 			       struct cgx_mac_addr_set_or_get *rsp)
1702 {
1703 	int pf = rvu_get_pf(req->hdr.pcifunc);
1704 	u32 seq_id = req->index;
1705 	struct rvu_pfvf *pfvf;
1706 	u8 cgx_id, lmac_id;
1707 	u32 mcam_idx = -1;
1708 	int rc, nixlf;
1709 
1710 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1711 
1712 	pfvf = &rvu->pf[pf];
1713 
1714 	/* If table does not have an entry; both update entry and del table entry API
1715 	 * below fails. Those are not failure conditions.
1716 	 */
1717 	rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
1718 					      req->mac_addr, &seq_id);
1719 	if (!rc) {
1720 		rsp->index = seq_id;
1721 		ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1722 		ether_addr_copy(rsp->mac_addr, req->mac_addr);
1723 		dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
1724 			__func__, req->mac_addr, pf);
1725 		return 0;
1726 	}
1727 
1728 	/* Try deleting and adding it again */
1729 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1730 	if (rc) {
1731 		dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
1732 			__func__, pfvf->mac_addr, pf);
1733 	}
1734 
1735 	/* find mcam entry if exist */
1736 	rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
1737 	if (!rc) {
1738 		mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
1739 						    nixlf, NIXLF_UCAST_ENTRY);
1740 	}
1741 
1742 	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1743 					   pfvf->rx_chan_base, 0, &seq_id,
1744 					   true, mcam_idx, req->hdr.pcifunc);
1745 	if (rc) {
1746 		dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
1747 			__func__, req->mac_addr, pf);
1748 		return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1749 	}
1750 
1751 	rsp->index = seq_id;
1752 	ether_addr_copy(rsp->mac_addr, req->mac_addr);
1753 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1754 	dev_dbg(rvu->dev,
1755 		"%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
1756 		__func__, req->mac_addr, pf, seq_id);
1757 	return 0;
1758 }
1759 
1760 /**
1761  *	rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
1762  *      @rvu: resource virtualization unit.
1763  *	Return: True if exact match feature is supported.
1764  */
1765 bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
1766 {
1767 	struct npc_exact_table *table = rvu->hw->table;
1768 	bool empty;
1769 
1770 	if (!rvu->hw->cap.npc_exact_match_enabled)
1771 		return false;
1772 
1773 	mutex_lock(&table->lock);
1774 	empty = list_empty(&table->lhead_gbl);
1775 	mutex_unlock(&table->lock);
1776 
1777 	return empty;
1778 }
1779 
1780 /**
1781  *	rvu_npc_exact_disable_feature - Disable feature.
1782  *      @rvu: resource virtualization unit.
1783  */
1784 void rvu_npc_exact_disable_feature(struct rvu *rvu)
1785 {
1786 	rvu->hw->cap.npc_exact_match_enabled = false;
1787 }
1788 
1789 /**
1790  *	rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
1791  *      @rvu: resource virtualization unit.
1792  *	@pcifunc: PCI func to match.
1793  */
1794 void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
1795 {
1796 	struct npc_exact_table *table = rvu->hw->table;
1797 	struct npc_exact_table_entry *tmp, *iter;
1798 	u32 seq_id;
1799 
1800 	mutex_lock(&table->lock);
1801 	list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
1802 		if (pcifunc != iter->pcifunc)
1803 			continue;
1804 
1805 		seq_id = iter->seq_id;
1806 		dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
1807 			pcifunc, seq_id);
1808 
1809 		mutex_unlock(&table->lock);
1810 		rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1811 		mutex_lock(&table->lock);
1812 	}
1813 	mutex_unlock(&table->lock);
1814 }
1815 
1816 /**
1817  *      rvu_npc_exact_init - initialize exact match table
1818  *      @rvu: resource virtualization unit.
1819  *
1820  *	Initialize HW and SW resources to manage 4way-2K table and fully
1821  *	associative 32-entry mcam table.
1822  *	Return: 0 upon success.
1823  */
1824 int rvu_npc_exact_init(struct rvu *rvu)
1825 {
1826 	u64 bcast_mcast_val, bcast_mcast_mask;
1827 	struct npc_exact_table *table;
1828 	u64 exact_val, exact_mask;
1829 	u64 chan_val, chan_mask;
1830 	u8 cgx_id, lmac_id;
1831 	u32 *drop_mcam_idx;
1832 	u16 max_lmac_cnt;
1833 	u64 npc_const3;
1834 	int table_size;
1835 	int blkaddr;
1836 	u16 pcifunc;
1837 	int err, i;
1838 	u64 cfg;
1839 	bool rc;
1840 
1841 	/* Read NPC_AF_CONST3 and check for have exact
1842 	 * match functionality is present
1843 	 */
1844 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1845 	if (blkaddr < 0) {
1846 		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
1847 		return -EINVAL;
1848 	}
1849 
1850 	/* Check exact match feature is supported */
1851 	npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
1852 	if (!(npc_const3 & BIT_ULL(62)))
1853 		return 0;
1854 
1855 	/* Check if kex profile has enabled EXACT match nibble */
1856 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1857 	if (!(cfg & NPC_EXACT_NIBBLE_HIT))
1858 		return 0;
1859 
1860 	/* Set capability to true */
1861 	rvu->hw->cap.npc_exact_match_enabled = true;
1862 
1863 	table = kzalloc(sizeof(*table), GFP_KERNEL);
1864 	if (!table)
1865 		return -ENOMEM;
1866 
1867 	dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
1868 	rvu->hw->table = table;
1869 
1870 	/* Read table size, ways and depth */
1871 	table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
1872 	table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
1873 	table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
1874 
1875 	dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
1876 		__func__,  table->mem_table.ways, table->cam_table.depth);
1877 
1878 	/* Check if depth of table is not a sequre of 2
1879 	 * TODO: why _builtin_popcount() is not working ?
1880 	 */
1881 	if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
1882 		dev_err(rvu->dev,
1883 			"%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
1884 			__func__,  table->mem_table.depth);
1885 		return -EINVAL;
1886 	}
1887 
1888 	table_size = table->mem_table.depth * table->mem_table.ways;
1889 
1890 	/* Allocate bitmap for 4way 2K table */
1891 	table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size,
1892 						   GFP_KERNEL);
1893 	if (!table->mem_table.bmap)
1894 		return -ENOMEM;
1895 
1896 	dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
1897 
1898 	/* Allocate bitmap for 32 entry mcam */
1899 	table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL);
1900 
1901 	if (!table->cam_table.bmap)
1902 		return -ENOMEM;
1903 
1904 	dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
1905 
1906 	table->tot_ids = table_size + table->cam_table.depth;
1907 	table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids,
1908 					    GFP_KERNEL);
1909 
1910 	if (!table->id_bmap)
1911 		return -ENOMEM;
1912 
1913 	dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
1914 		__func__, table->tot_ids);
1915 
1916 	/* Initialize list heads for npc_exact_table entries.
1917 	 * This entry is used by debugfs to show entries in
1918 	 * exact match table.
1919 	 */
1920 	for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
1921 		INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
1922 
1923 	INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
1924 	INIT_LIST_HEAD(&table->lhead_gbl);
1925 
1926 	mutex_init(&table->lock);
1927 
1928 	rvu_exact_config_secret_key(rvu);
1929 	rvu_exact_config_search_key(rvu);
1930 
1931 	rvu_exact_config_table_mask(rvu);
1932 	rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
1933 
1934 	/* - No drop rule for LBK
1935 	 * - Drop rules for SDP and each LMAC.
1936 	 */
1937 	exact_val = !NPC_EXACT_RESULT_HIT;
1938 	exact_mask = NPC_EXACT_RESULT_HIT;
1939 
1940 	/* nibble - 3	2  1   0
1941 	 *	   L3B L3M L2B L2M
1942 	 */
1943 	bcast_mcast_val = 0b0000;
1944 	bcast_mcast_mask = 0b0011;
1945 
1946 	/* Install SDP drop rule */
1947 	drop_mcam_idx = &table->num_drop_rules;
1948 
1949 	max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
1950 		       PF_CGXMAP_BASE;
1951 
1952 	for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
1953 		if (rvu->pf2cgxlmac_map[i] == 0xFF)
1954 			continue;
1955 
1956 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
1957 
1958 		rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
1959 								lmac_id, &chan_val, &chan_mask);
1960 		if (!rc) {
1961 			dev_err(rvu->dev,
1962 				"%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
1963 				__func__, chan_val, chan_mask, *drop_mcam_idx);
1964 			return -EINVAL;
1965 		}
1966 
1967 		/* Filter rules are only for PF */
1968 		pcifunc = RVU_PFFUNC(i, 0);
1969 
1970 		dev_dbg(rvu->dev,
1971 			"%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
1972 			__func__, cgx_id, lmac_id, chan_val, chan_mask);
1973 
1974 		rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
1975 								chan_val, chan_mask, pcifunc);
1976 		if (!rc) {
1977 			dev_err(rvu->dev,
1978 				"%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
1979 				__func__, cgx_id, lmac_id, chan_val);
1980 			return -EINVAL;
1981 		}
1982 
1983 		err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
1984 						 &table->counter_idx[*drop_mcam_idx],
1985 						 chan_val, chan_mask,
1986 						 exact_val, exact_mask,
1987 						 bcast_mcast_val, bcast_mcast_mask);
1988 		if (err) {
1989 			dev_err(rvu->dev,
1990 				"failed to configure drop rule (cgx=%d lmac=%d)\n",
1991 				cgx_id, lmac_id);
1992 			return err;
1993 		}
1994 
1995 		(*drop_mcam_idx)++;
1996 	}
1997 
1998 	dev_info(rvu->dev, "initialized exact match table successfully\n");
1999 	return 0;
2000 }
2001