1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_tc_lib.h"
6 #include "ice_fltr.h"
7 #include "ice_lib.h"
8 #include "ice_protocol_type.h"
9 
10 /**
11  * ice_tc_count_lkups - determine lookup count for switch filter
12  * @flags: TC-flower flags
13  * @headers: Pointer to TC flower filter header structure
14  * @fltr: Pointer to outer TC filter structure
15  *
16  * Determine lookup count based on TC flower input for switch filter.
17  */
18 static int
19 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
20 		   struct ice_tc_flower_fltr *fltr)
21 {
22 	int lkups_cnt = 0;
23 
24 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
25 		lkups_cnt++;
26 
27 	if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
28 		lkups_cnt++;
29 
30 	if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
31 		lkups_cnt++;
32 
33 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
34 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
35 		     ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
36 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
37 		lkups_cnt++;
38 
39 	if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
40 		     ICE_TC_FLWR_FIELD_ENC_IP_TTL))
41 		lkups_cnt++;
42 
43 	if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
44 		lkups_cnt++;
45 
46 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
47 		lkups_cnt++;
48 
49 	/* are MAC fields specified? */
50 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
51 		lkups_cnt++;
52 
53 	/* is VLAN specified? */
54 	if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
55 		lkups_cnt++;
56 
57 	/* is CVLAN specified? */
58 	if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
59 		lkups_cnt++;
60 
61 	/* are PPPoE options specified? */
62 	if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
63 		     ICE_TC_FLWR_FIELD_PPP_PROTO))
64 		lkups_cnt++;
65 
66 	/* are IPv[4|6] fields specified? */
67 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
68 		     ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
69 		lkups_cnt++;
70 
71 	if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))
72 		lkups_cnt++;
73 
74 	/* are L2TPv3 options specified? */
75 	if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID)
76 		lkups_cnt++;
77 
78 	/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
79 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
80 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT))
81 		lkups_cnt++;
82 
83 	return lkups_cnt;
84 }
85 
86 static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
87 {
88 	return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
89 }
90 
91 static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
92 {
93 	return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
94 }
95 
96 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
97 {
98 	return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
99 }
100 
101 static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
102 {
103 	return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
104 }
105 
106 static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
107 {
108 	switch (ip_proto) {
109 	case IPPROTO_TCP:
110 		return ICE_TCP_IL;
111 	case IPPROTO_UDP:
112 		return ICE_UDP_ILOS;
113 	}
114 
115 	return 0;
116 }
117 
118 static enum ice_protocol_type
119 ice_proto_type_from_tunnel(enum ice_tunnel_type type)
120 {
121 	switch (type) {
122 	case TNL_VXLAN:
123 		return ICE_VXLAN;
124 	case TNL_GENEVE:
125 		return ICE_GENEVE;
126 	case TNL_GRETAP:
127 		return ICE_NVGRE;
128 	case TNL_GTPU:
129 		/* NO_PAY profiles will not work with GTP-U */
130 		return ICE_GTP;
131 	case TNL_GTPC:
132 		return ICE_GTP_NO_PAY;
133 	default:
134 		return 0;
135 	}
136 }
137 
138 static enum ice_sw_tunnel_type
139 ice_sw_type_from_tunnel(enum ice_tunnel_type type)
140 {
141 	switch (type) {
142 	case TNL_VXLAN:
143 		return ICE_SW_TUN_VXLAN;
144 	case TNL_GENEVE:
145 		return ICE_SW_TUN_GENEVE;
146 	case TNL_GRETAP:
147 		return ICE_SW_TUN_NVGRE;
148 	case TNL_GTPU:
149 		return ICE_SW_TUN_GTPU;
150 	case TNL_GTPC:
151 		return ICE_SW_TUN_GTPC;
152 	default:
153 		return ICE_NON_TUN;
154 	}
155 }
156 
157 static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
158 {
159 	switch (vlan_tpid) {
160 	case ETH_P_8021Q:
161 	case ETH_P_8021AD:
162 	case ETH_P_QINQ1:
163 		return vlan_tpid;
164 	default:
165 		return 0;
166 	}
167 }
168 
169 static int
170 ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
171 			 struct ice_adv_lkup_elem *list)
172 {
173 	struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
174 	int i = 0;
175 
176 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
177 		u32 tenant_id;
178 
179 		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
180 		switch (fltr->tunnel_type) {
181 		case TNL_VXLAN:
182 		case TNL_GENEVE:
183 			tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
184 			list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
185 			memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
186 			i++;
187 			break;
188 		case TNL_GRETAP:
189 			list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
190 			memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
191 			       "\xff\xff\xff\xff", 4);
192 			i++;
193 			break;
194 		case TNL_GTPC:
195 		case TNL_GTPU:
196 			list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
197 			memcpy(&list[i].m_u.gtp_hdr.teid,
198 			       "\xff\xff\xff\xff", 4);
199 			i++;
200 			break;
201 		default:
202 			break;
203 		}
204 	}
205 
206 	if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
207 		list[i].type = ice_proto_type_from_mac(false);
208 		ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
209 				hdr->l2_key.dst_mac);
210 		ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
211 				hdr->l2_mask.dst_mac);
212 		i++;
213 	}
214 
215 	if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
216 	    (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
217 		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
218 
219 		if (fltr->gtp_pdu_info_masks.pdu_type) {
220 			list[i].h_u.gtp_hdr.pdu_type =
221 				fltr->gtp_pdu_info_keys.pdu_type << 4;
222 			memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
223 		}
224 
225 		if (fltr->gtp_pdu_info_masks.qfi) {
226 			list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
227 			memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
228 		}
229 
230 		i++;
231 	}
232 
233 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
234 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
235 		list[i].type = ice_proto_type_from_ipv4(false);
236 
237 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
238 			list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
239 			list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
240 		}
241 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
242 			list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
243 			list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
244 		}
245 		i++;
246 	}
247 
248 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
249 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
250 		list[i].type = ice_proto_type_from_ipv6(false);
251 
252 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
253 			memcpy(&list[i].h_u.ipv6_hdr.src_addr,
254 			       &hdr->l3_key.src_ipv6_addr,
255 			       sizeof(hdr->l3_key.src_ipv6_addr));
256 			memcpy(&list[i].m_u.ipv6_hdr.src_addr,
257 			       &hdr->l3_mask.src_ipv6_addr,
258 			       sizeof(hdr->l3_mask.src_ipv6_addr));
259 		}
260 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
261 			memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
262 			       &hdr->l3_key.dst_ipv6_addr,
263 			       sizeof(hdr->l3_key.dst_ipv6_addr));
264 			memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
265 			       &hdr->l3_mask.dst_ipv6_addr,
266 			       sizeof(hdr->l3_mask.dst_ipv6_addr));
267 		}
268 		i++;
269 	}
270 
271 	if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) &&
272 	    (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
273 		      ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
274 		list[i].type = ice_proto_type_from_ipv4(false);
275 
276 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
277 			list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos;
278 			list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos;
279 		}
280 
281 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
282 			list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl;
283 			list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl;
284 		}
285 
286 		i++;
287 	}
288 
289 	if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) &&
290 	    (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
291 		      ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
292 		struct ice_ipv6_hdr *hdr_h, *hdr_m;
293 
294 		hdr_h = &list[i].h_u.ipv6_hdr;
295 		hdr_m = &list[i].m_u.ipv6_hdr;
296 		list[i].type = ice_proto_type_from_ipv6(false);
297 
298 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
299 			be32p_replace_bits(&hdr_h->be_ver_tc_flow,
300 					   hdr->l3_key.tos,
301 					   ICE_IPV6_HDR_TC_MASK);
302 			be32p_replace_bits(&hdr_m->be_ver_tc_flow,
303 					   hdr->l3_mask.tos,
304 					   ICE_IPV6_HDR_TC_MASK);
305 		}
306 
307 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
308 			hdr_h->hop_limit = hdr->l3_key.ttl;
309 			hdr_m->hop_limit = hdr->l3_mask.ttl;
310 		}
311 
312 		i++;
313 	}
314 
315 	if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
316 	    hdr->l3_key.ip_proto == IPPROTO_UDP) {
317 		list[i].type = ICE_UDP_OF;
318 		list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
319 		list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
320 		i++;
321 	}
322 
323 	return i;
324 }
325 
326 /**
327  * ice_tc_fill_rules - fill filter rules based on TC fltr
328  * @hw: pointer to HW structure
329  * @flags: tc flower field flags
330  * @tc_fltr: pointer to TC flower filter
331  * @list: list of advance rule elements
332  * @rule_info: pointer to information about rule
333  * @l4_proto: pointer to information such as L4 proto type
334  *
335  * Fill ice_adv_lkup_elem list based on TC flower flags and
336  * TC flower headers. This list should be used to add
337  * advance filter in hardware.
338  */
339 static int
340 ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
341 		  struct ice_tc_flower_fltr *tc_fltr,
342 		  struct ice_adv_lkup_elem *list,
343 		  struct ice_adv_rule_info *rule_info,
344 		  u16 *l4_proto)
345 {
346 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
347 	bool inner = false;
348 	u16 vlan_tpid = 0;
349 	int i = 0;
350 
351 	rule_info->vlan_type = vlan_tpid;
352 
353 	rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
354 	if (tc_fltr->tunnel_type != TNL_LAST) {
355 		i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
356 
357 		headers = &tc_fltr->inner_headers;
358 		inner = true;
359 	}
360 
361 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
362 		list[i].type = ice_proto_type_from_etype(inner);
363 		list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
364 		list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
365 		i++;
366 	}
367 
368 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
369 		     ICE_TC_FLWR_FIELD_SRC_MAC)) {
370 		struct ice_tc_l2_hdr *l2_key, *l2_mask;
371 
372 		l2_key = &headers->l2_key;
373 		l2_mask = &headers->l2_mask;
374 
375 		list[i].type = ice_proto_type_from_mac(inner);
376 		if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
377 			ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
378 					l2_key->dst_mac);
379 			ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
380 					l2_mask->dst_mac);
381 		}
382 		if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
383 			ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
384 					l2_key->src_mac);
385 			ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
386 					l2_mask->src_mac);
387 		}
388 		i++;
389 	}
390 
391 	/* copy VLAN info */
392 	if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
393 		vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
394 		rule_info->vlan_type =
395 				ice_check_supported_vlan_tpid(vlan_tpid);
396 
397 		if (flags & ICE_TC_FLWR_FIELD_CVLAN)
398 			list[i].type = ICE_VLAN_EX;
399 		else
400 			list[i].type = ICE_VLAN_OFOS;
401 
402 		if (flags & ICE_TC_FLWR_FIELD_VLAN) {
403 			list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
404 			list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
405 		}
406 
407 		if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) {
408 			if (flags & ICE_TC_FLWR_FIELD_VLAN) {
409 				list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
410 			} else {
411 				list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
412 				list[i].h_u.vlan_hdr.vlan = 0;
413 			}
414 			list[i].h_u.vlan_hdr.vlan |=
415 				headers->vlan_hdr.vlan_prio;
416 		}
417 
418 		i++;
419 	}
420 
421 	if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
422 		list[i].type = ICE_VLAN_IN;
423 
424 		if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
425 			list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
426 			list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
427 		}
428 
429 		if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) {
430 			if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
431 				list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
432 			} else {
433 				list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
434 				list[i].h_u.vlan_hdr.vlan = 0;
435 			}
436 			list[i].h_u.vlan_hdr.vlan |=
437 				headers->cvlan_hdr.vlan_prio;
438 		}
439 
440 		i++;
441 	}
442 
443 	if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
444 		     ICE_TC_FLWR_FIELD_PPP_PROTO)) {
445 		struct ice_pppoe_hdr *vals, *masks;
446 
447 		vals = &list[i].h_u.pppoe_hdr;
448 		masks = &list[i].m_u.pppoe_hdr;
449 
450 		list[i].type = ICE_PPPOE;
451 
452 		if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) {
453 			vals->session_id = headers->pppoe_hdr.session_id;
454 			masks->session_id = cpu_to_be16(0xFFFF);
455 		}
456 
457 		if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) {
458 			vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto;
459 			masks->ppp_prot_id = cpu_to_be16(0xFFFF);
460 		}
461 
462 		i++;
463 	}
464 
465 	/* copy L3 (IPv[4|6]: src, dest) address */
466 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
467 		     ICE_TC_FLWR_FIELD_SRC_IPV4)) {
468 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
469 
470 		list[i].type = ice_proto_type_from_ipv4(inner);
471 		l3_key = &headers->l3_key;
472 		l3_mask = &headers->l3_mask;
473 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
474 			list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
475 			list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
476 		}
477 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
478 			list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
479 			list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
480 		}
481 		i++;
482 	} else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
483 			    ICE_TC_FLWR_FIELD_SRC_IPV6)) {
484 		struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
485 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
486 
487 		list[i].type = ice_proto_type_from_ipv6(inner);
488 		ipv6_hdr = &list[i].h_u.ipv6_hdr;
489 		ipv6_mask = &list[i].m_u.ipv6_hdr;
490 		l3_key = &headers->l3_key;
491 		l3_mask = &headers->l3_mask;
492 
493 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
494 			memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
495 			       sizeof(l3_key->dst_ipv6_addr));
496 			memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
497 			       sizeof(l3_mask->dst_ipv6_addr));
498 		}
499 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
500 			memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
501 			       sizeof(l3_key->src_ipv6_addr));
502 			memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
503 			       sizeof(l3_mask->src_ipv6_addr));
504 		}
505 		i++;
506 	}
507 
508 	if (headers->l2_key.n_proto == htons(ETH_P_IP) &&
509 	    (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
510 		list[i].type = ice_proto_type_from_ipv4(inner);
511 
512 		if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
513 			list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos;
514 			list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos;
515 		}
516 
517 		if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
518 			list[i].h_u.ipv4_hdr.time_to_live =
519 				headers->l3_key.ttl;
520 			list[i].m_u.ipv4_hdr.time_to_live =
521 				headers->l3_mask.ttl;
522 		}
523 
524 		i++;
525 	}
526 
527 	if (headers->l2_key.n_proto == htons(ETH_P_IPV6) &&
528 	    (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
529 		struct ice_ipv6_hdr *hdr_h, *hdr_m;
530 
531 		hdr_h = &list[i].h_u.ipv6_hdr;
532 		hdr_m = &list[i].m_u.ipv6_hdr;
533 		list[i].type = ice_proto_type_from_ipv6(inner);
534 
535 		if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
536 			be32p_replace_bits(&hdr_h->be_ver_tc_flow,
537 					   headers->l3_key.tos,
538 					   ICE_IPV6_HDR_TC_MASK);
539 			be32p_replace_bits(&hdr_m->be_ver_tc_flow,
540 					   headers->l3_mask.tos,
541 					   ICE_IPV6_HDR_TC_MASK);
542 		}
543 
544 		if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
545 			hdr_h->hop_limit = headers->l3_key.ttl;
546 			hdr_m->hop_limit = headers->l3_mask.ttl;
547 		}
548 
549 		i++;
550 	}
551 
552 	if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) {
553 		list[i].type = ICE_L2TPV3;
554 
555 		list[i].h_u.l2tpv3_sess_hdr.session_id =
556 			headers->l2tpv3_hdr.session_id;
557 		list[i].m_u.l2tpv3_sess_hdr.session_id =
558 			cpu_to_be32(0xFFFFFFFF);
559 
560 		i++;
561 	}
562 
563 	/* copy L4 (src, dest) port */
564 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
565 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
566 		struct ice_tc_l4_hdr *l4_key, *l4_mask;
567 
568 		list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
569 		l4_key = &headers->l4_key;
570 		l4_mask = &headers->l4_mask;
571 
572 		if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
573 			list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
574 			list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
575 		}
576 		if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
577 			list[i].h_u.l4_hdr.src_port = l4_key->src_port;
578 			list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
579 		}
580 		i++;
581 	}
582 
583 	return i;
584 }
585 
586 /**
587  * ice_tc_tun_get_type - get the tunnel type
588  * @tunnel_dev: ptr to tunnel device
589  *
590  * This function detects appropriate tunnel_type if specified device is
591  * tunnel device such as VXLAN/Geneve
592  */
593 static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
594 {
595 	if (netif_is_vxlan(tunnel_dev))
596 		return TNL_VXLAN;
597 	if (netif_is_geneve(tunnel_dev))
598 		return TNL_GENEVE;
599 	if (netif_is_gretap(tunnel_dev) ||
600 	    netif_is_ip6gretap(tunnel_dev))
601 		return TNL_GRETAP;
602 
603 	/* Assume GTP-U by default in case of GTP netdev.
604 	 * GTP-C may be selected later, based on enc_dst_port.
605 	 */
606 	if (netif_is_gtp(tunnel_dev))
607 		return TNL_GTPU;
608 	return TNL_LAST;
609 }
610 
611 bool ice_is_tunnel_supported(struct net_device *dev)
612 {
613 	return ice_tc_tun_get_type(dev) != TNL_LAST;
614 }
615 
616 static int
617 ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
618 			    struct flow_action_entry *act)
619 {
620 	struct ice_repr *repr;
621 
622 	switch (act->id) {
623 	case FLOW_ACTION_DROP:
624 		fltr->action.fltr_act = ICE_DROP_PACKET;
625 		break;
626 
627 	case FLOW_ACTION_REDIRECT:
628 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
629 
630 		if (ice_is_port_repr_netdev(act->dev)) {
631 			repr = ice_netdev_to_repr(act->dev);
632 
633 			fltr->dest_vsi = repr->src_vsi;
634 			fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
635 		} else if (netif_is_ice(act->dev) ||
636 			   ice_is_tunnel_supported(act->dev)) {
637 			fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
638 		} else {
639 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
640 			return -EINVAL;
641 		}
642 
643 		break;
644 
645 	default:
646 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
647 		return -EINVAL;
648 	}
649 
650 	return 0;
651 }
652 
653 static int
654 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
655 {
656 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
657 	struct ice_adv_rule_info rule_info = { 0 };
658 	struct ice_rule_query_data rule_added;
659 	struct ice_hw *hw = &vsi->back->hw;
660 	struct ice_adv_lkup_elem *list;
661 	u32 flags = fltr->flags;
662 	int lkups_cnt;
663 	int ret;
664 	int i;
665 
666 	if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
667 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
668 		return -EOPNOTSUPP;
669 	}
670 
671 	lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
672 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
673 	if (!list)
674 		return -ENOMEM;
675 
676 	i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
677 	if (i != lkups_cnt) {
678 		ret = -EINVAL;
679 		goto exit;
680 	}
681 
682 	/* egress traffic is always redirect to uplink */
683 	if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
684 		fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
685 
686 	rule_info.sw_act.fltr_act = fltr->action.fltr_act;
687 	if (fltr->action.fltr_act != ICE_DROP_PACKET)
688 		rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
689 	/* For now, making priority to be highest, and it also becomes
690 	 * the priority for recipe which will get created as a result of
691 	 * new extraction sequence based on input set.
692 	 * Priority '7' is max val for switch recipe, higher the number
693 	 * results into order of switch rule evaluation.
694 	 */
695 	rule_info.priority = 7;
696 
697 	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
698 		rule_info.sw_act.flag |= ICE_FLTR_RX;
699 		rule_info.sw_act.src = hw->pf_id;
700 		rule_info.rx = true;
701 	} else {
702 		rule_info.sw_act.flag |= ICE_FLTR_TX;
703 		rule_info.sw_act.src = vsi->idx;
704 		rule_info.rx = false;
705 		rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
706 		rule_info.flags_info.act_valid = true;
707 	}
708 
709 	/* specify the cookie as filter_rule_id */
710 	rule_info.fltr_rule_id = fltr->cookie;
711 
712 	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
713 	if (ret == -EEXIST) {
714 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
715 		ret = -EINVAL;
716 		goto exit;
717 	} else if (ret) {
718 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
719 		goto exit;
720 	}
721 
722 	/* store the output params, which are needed later for removing
723 	 * advanced switch filter
724 	 */
725 	fltr->rid = rule_added.rid;
726 	fltr->rule_id = rule_added.rule_id;
727 	fltr->dest_id = rule_added.vsi_handle;
728 
729 exit:
730 	kfree(list);
731 	return ret;
732 }
733 
734 /**
735  * ice_add_tc_flower_adv_fltr - add appropriate filter rules
736  * @vsi: Pointer to VSI
737  * @tc_fltr: Pointer to TC flower filter structure
738  *
739  * based on filter parameters using Advance recipes supported
740  * by OS package.
741  */
742 static int
743 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
744 			   struct ice_tc_flower_fltr *tc_fltr)
745 {
746 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
747 	struct ice_adv_rule_info rule_info = {0};
748 	struct ice_rule_query_data rule_added;
749 	struct ice_adv_lkup_elem *list;
750 	struct ice_pf *pf = vsi->back;
751 	struct ice_hw *hw = &pf->hw;
752 	u32 flags = tc_fltr->flags;
753 	struct ice_vsi *ch_vsi;
754 	struct device *dev;
755 	u16 lkups_cnt = 0;
756 	u16 l4_proto = 0;
757 	int ret = 0;
758 	u16 i = 0;
759 
760 	dev = ice_pf_to_dev(pf);
761 	if (ice_is_safe_mode(pf)) {
762 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
763 		return -EOPNOTSUPP;
764 	}
765 
766 	if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
767 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
768 				ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
769 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
770 				ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
771 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
772 		return -EOPNOTSUPP;
773 	}
774 
775 	/* get the channel (aka ADQ VSI) */
776 	if (tc_fltr->dest_vsi)
777 		ch_vsi = tc_fltr->dest_vsi;
778 	else
779 		ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
780 
781 	lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
782 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
783 	if (!list)
784 		return -ENOMEM;
785 
786 	i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
787 	if (i != lkups_cnt) {
788 		ret = -EINVAL;
789 		goto exit;
790 	}
791 
792 	rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
793 	if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
794 		if (!ch_vsi) {
795 			NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
796 			ret = -EINVAL;
797 			goto exit;
798 		}
799 
800 		rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
801 		rule_info.sw_act.vsi_handle = ch_vsi->idx;
802 		rule_info.priority = 7;
803 		rule_info.sw_act.src = hw->pf_id;
804 		rule_info.rx = true;
805 		dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
806 			tc_fltr->action.tc_class,
807 			rule_info.sw_act.vsi_handle, lkups_cnt);
808 	} else {
809 		rule_info.sw_act.flag |= ICE_FLTR_TX;
810 		rule_info.sw_act.src = vsi->idx;
811 		rule_info.rx = false;
812 	}
813 
814 	/* specify the cookie as filter_rule_id */
815 	rule_info.fltr_rule_id = tc_fltr->cookie;
816 
817 	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
818 	if (ret == -EEXIST) {
819 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
820 				   "Unable to add filter because it already exist");
821 		ret = -EINVAL;
822 		goto exit;
823 	} else if (ret) {
824 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
825 				   "Unable to add filter due to error");
826 		goto exit;
827 	}
828 
829 	/* store the output params, which are needed later for removing
830 	 * advanced switch filter
831 	 */
832 	tc_fltr->rid = rule_added.rid;
833 	tc_fltr->rule_id = rule_added.rule_id;
834 	if (tc_fltr->action.tc_class > 0 && ch_vsi) {
835 		/* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
836 		 * for PF ADQ filter, it is not yet set in tc_fltr,
837 		 * hence store the dest_vsi ptr in tc_fltr
838 		 */
839 		if (ch_vsi->type == ICE_VSI_CHNL)
840 			tc_fltr->dest_vsi = ch_vsi;
841 		/* keep track of advanced switch filter for
842 		 * destination VSI (channel VSI)
843 		 */
844 		ch_vsi->num_chnl_fltr++;
845 		/* in this case, dest_id is VSI handle (sw handle) */
846 		tc_fltr->dest_id = rule_added.vsi_handle;
847 
848 		/* keeps track of channel filters for PF VSI */
849 		if (vsi->type == ICE_VSI_PF &&
850 		    (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
851 			      ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
852 			pf->num_dmac_chnl_fltrs++;
853 	}
854 	dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
855 		lkups_cnt, flags,
856 		tc_fltr->action.tc_class, rule_added.rid,
857 		rule_added.rule_id, rule_added.vsi_handle);
858 exit:
859 	kfree(list);
860 	return ret;
861 }
862 
863 /**
864  * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter
865  * @match: Pointer to flow match structure
866  * @fltr: Pointer to filter structure
867  * @headers: Pointer to outer header fields
868  * @returns PPP protocol used in filter (ppp_ses or ppp_disc)
869  */
870 static u16
871 ice_tc_set_pppoe(struct flow_match_pppoe *match,
872 		 struct ice_tc_flower_fltr *fltr,
873 		 struct ice_tc_flower_lyr_2_4_hdrs *headers)
874 {
875 	if (match->mask->session_id) {
876 		fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
877 		headers->pppoe_hdr.session_id = match->key->session_id;
878 	}
879 
880 	if (match->mask->ppp_proto) {
881 		fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
882 		headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
883 	}
884 
885 	return be16_to_cpu(match->key->type);
886 }
887 
888 /**
889  * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
890  * @match: Pointer to flow match structure
891  * @fltr: Pointer to filter structure
892  * @headers: inner or outer header fields
893  * @is_encap: set true for tunnel IPv4 address
894  */
895 static int
896 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
897 		struct ice_tc_flower_fltr *fltr,
898 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
899 {
900 	if (match->key->dst) {
901 		if (is_encap)
902 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
903 		else
904 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
905 		headers->l3_key.dst_ipv4 = match->key->dst;
906 		headers->l3_mask.dst_ipv4 = match->mask->dst;
907 	}
908 	if (match->key->src) {
909 		if (is_encap)
910 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
911 		else
912 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
913 		headers->l3_key.src_ipv4 = match->key->src;
914 		headers->l3_mask.src_ipv4 = match->mask->src;
915 	}
916 	return 0;
917 }
918 
919 /**
920  * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
921  * @match: Pointer to flow match structure
922  * @fltr: Pointer to filter structure
923  * @headers: inner or outer header fields
924  * @is_encap: set true for tunnel IPv6 address
925  */
926 static int
927 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
928 		struct ice_tc_flower_fltr *fltr,
929 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
930 {
931 	struct ice_tc_l3_hdr *l3_key, *l3_mask;
932 
933 	/* src and dest IPV6 address should not be LOOPBACK
934 	 * (0:0:0:0:0:0:0:1), which can be represented as ::1
935 	 */
936 	if (ipv6_addr_loopback(&match->key->dst) ||
937 	    ipv6_addr_loopback(&match->key->src)) {
938 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
939 		return -EINVAL;
940 	}
941 	/* if src/dest IPv6 address is *,* error */
942 	if (ipv6_addr_any(&match->mask->dst) &&
943 	    ipv6_addr_any(&match->mask->src)) {
944 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
945 		return -EINVAL;
946 	}
947 	if (!ipv6_addr_any(&match->mask->dst)) {
948 		if (is_encap)
949 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
950 		else
951 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
952 	}
953 	if (!ipv6_addr_any(&match->mask->src)) {
954 		if (is_encap)
955 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
956 		else
957 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
958 	}
959 
960 	l3_key = &headers->l3_key;
961 	l3_mask = &headers->l3_mask;
962 
963 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
964 			   ICE_TC_FLWR_FIELD_SRC_IPV6)) {
965 		memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
966 		       sizeof(match->key->src.s6_addr));
967 		memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
968 		       sizeof(match->mask->src.s6_addr));
969 	}
970 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
971 			   ICE_TC_FLWR_FIELD_DEST_IPV6)) {
972 		memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
973 		       sizeof(match->key->dst.s6_addr));
974 		memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
975 		       sizeof(match->mask->dst.s6_addr));
976 	}
977 
978 	return 0;
979 }
980 
981 /**
982  * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter
983  * @match: Pointer to flow match structure
984  * @fltr: Pointer to filter structure
985  * @headers: inner or outer header fields
986  * @is_encap: set true for tunnel
987  */
988 static void
989 ice_tc_set_tos_ttl(struct flow_match_ip *match,
990 		   struct ice_tc_flower_fltr *fltr,
991 		   struct ice_tc_flower_lyr_2_4_hdrs *headers,
992 		   bool is_encap)
993 {
994 	if (match->mask->tos) {
995 		if (is_encap)
996 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS;
997 		else
998 			fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS;
999 
1000 		headers->l3_key.tos = match->key->tos;
1001 		headers->l3_mask.tos = match->mask->tos;
1002 	}
1003 
1004 	if (match->mask->ttl) {
1005 		if (is_encap)
1006 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL;
1007 		else
1008 			fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL;
1009 
1010 		headers->l3_key.ttl = match->key->ttl;
1011 		headers->l3_mask.ttl = match->mask->ttl;
1012 	}
1013 }
1014 
1015 /**
1016  * ice_tc_set_port - Parse ports from TC flower filter
1017  * @match: Flow match structure
1018  * @fltr: Pointer to filter structure
1019  * @headers: inner or outer header fields
1020  * @is_encap: set true for tunnel port
1021  */
1022 static int
1023 ice_tc_set_port(struct flow_match_ports match,
1024 		struct ice_tc_flower_fltr *fltr,
1025 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
1026 {
1027 	if (match.key->dst) {
1028 		if (is_encap)
1029 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
1030 		else
1031 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
1032 
1033 		headers->l4_key.dst_port = match.key->dst;
1034 		headers->l4_mask.dst_port = match.mask->dst;
1035 	}
1036 	if (match.key->src) {
1037 		if (is_encap)
1038 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
1039 		else
1040 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
1041 
1042 		headers->l4_key.src_port = match.key->src;
1043 		headers->l4_mask.src_port = match.mask->src;
1044 	}
1045 	return 0;
1046 }
1047 
1048 static struct net_device *
1049 ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
1050 {
1051 	struct flow_action_entry *act;
1052 	int i;
1053 
1054 	if (ice_is_tunnel_supported(dev))
1055 		return dev;
1056 
1057 	flow_action_for_each(i, act, &rule->action) {
1058 		if (act->id == FLOW_ACTION_REDIRECT &&
1059 		    ice_is_tunnel_supported(act->dev))
1060 			return act->dev;
1061 	}
1062 
1063 	return NULL;
1064 }
1065 
1066 /**
1067  * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
1068  * @match: Flow match structure
1069  * @fltr: Pointer to filter structure
1070  *
1071  * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
1072  * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
1073  * therefore making GTP-U the default choice (when destination port number is
1074  * not specified).
1075  */
1076 static int
1077 ice_parse_gtp_type(struct flow_match_ports match,
1078 		   struct ice_tc_flower_fltr *fltr)
1079 {
1080 	u16 dst_port;
1081 
1082 	if (match.key->dst) {
1083 		dst_port = be16_to_cpu(match.key->dst);
1084 
1085 		switch (dst_port) {
1086 		case 2152:
1087 			break;
1088 		case 2123:
1089 			fltr->tunnel_type = TNL_GTPC;
1090 			break;
1091 		default:
1092 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
1093 			return -EINVAL;
1094 		}
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 static int
1101 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
1102 		      struct ice_tc_flower_fltr *fltr)
1103 {
1104 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
1105 	struct flow_match_control enc_control;
1106 
1107 	fltr->tunnel_type = ice_tc_tun_get_type(dev);
1108 	headers->l3_key.ip_proto = IPPROTO_UDP;
1109 
1110 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1111 		struct flow_match_enc_keyid enc_keyid;
1112 
1113 		flow_rule_match_enc_keyid(rule, &enc_keyid);
1114 
1115 		if (!enc_keyid.mask->keyid ||
1116 		    enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
1117 			return -EINVAL;
1118 
1119 		fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
1120 		fltr->tenant_id = enc_keyid.key->keyid;
1121 	}
1122 
1123 	flow_rule_match_enc_control(rule, &enc_control);
1124 
1125 	if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1126 		struct flow_match_ipv4_addrs match;
1127 
1128 		flow_rule_match_enc_ipv4_addrs(rule, &match);
1129 		if (ice_tc_set_ipv4(&match, fltr, headers, true))
1130 			return -EINVAL;
1131 	} else if (enc_control.key->addr_type ==
1132 					FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1133 		struct flow_match_ipv6_addrs match;
1134 
1135 		flow_rule_match_enc_ipv6_addrs(rule, &match);
1136 		if (ice_tc_set_ipv6(&match, fltr, headers, true))
1137 			return -EINVAL;
1138 	}
1139 
1140 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1141 		struct flow_match_ip match;
1142 
1143 		flow_rule_match_enc_ip(rule, &match);
1144 		ice_tc_set_tos_ttl(&match, fltr, headers, true);
1145 	}
1146 
1147 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
1148 	    fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
1149 		struct flow_match_ports match;
1150 
1151 		flow_rule_match_enc_ports(rule, &match);
1152 
1153 		if (fltr->tunnel_type != TNL_GTPU) {
1154 			if (ice_tc_set_port(match, fltr, headers, true))
1155 				return -EINVAL;
1156 		} else {
1157 			if (ice_parse_gtp_type(match, fltr))
1158 				return -EINVAL;
1159 		}
1160 	}
1161 
1162 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
1163 		struct flow_match_enc_opts match;
1164 
1165 		flow_rule_match_enc_opts(rule, &match);
1166 
1167 		memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
1168 		       sizeof(struct gtp_pdu_session_info));
1169 
1170 		memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
1171 		       sizeof(struct gtp_pdu_session_info));
1172 
1173 		fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
1174 	}
1175 
1176 	return 0;
1177 }
1178 
1179 /**
1180  * ice_parse_cls_flower - Parse TC flower filters provided by kernel
1181  * @vsi: Pointer to the VSI
1182  * @filter_dev: Pointer to device on which filter is being added
1183  * @f: Pointer to struct flow_cls_offload
1184  * @fltr: Pointer to filter structure
1185  */
1186 static int
1187 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
1188 		     struct flow_cls_offload *f,
1189 		     struct ice_tc_flower_fltr *fltr)
1190 {
1191 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
1192 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1193 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
1194 	struct flow_dissector *dissector;
1195 	struct net_device *tunnel_dev;
1196 
1197 	dissector = rule->match.dissector;
1198 
1199 	if (dissector->used_keys &
1200 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1201 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
1202 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1203 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
1204 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1205 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1206 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1207 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1208 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1209 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1210 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1211 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1212 	      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
1213 	      BIT(FLOW_DISSECTOR_KEY_IP) |
1214 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
1215 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
1216 	      BIT(FLOW_DISSECTOR_KEY_PPPOE) |
1217 	      BIT(FLOW_DISSECTOR_KEY_L2TPV3))) {
1218 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
1219 		return -EOPNOTSUPP;
1220 	}
1221 
1222 	tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
1223 	if (tunnel_dev) {
1224 		int err;
1225 
1226 		filter_dev = tunnel_dev;
1227 
1228 		err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
1229 		if (err) {
1230 			NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
1231 			return err;
1232 		}
1233 
1234 		/* header pointers should point to the inner headers, outer
1235 		 * header were already set by ice_parse_tunnel_attr
1236 		 */
1237 		headers = &fltr->inner_headers;
1238 	} else if (dissector->used_keys &
1239 		  (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1240 		   BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1241 		   BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1242 		   BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
1243 		NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
1244 		return -EOPNOTSUPP;
1245 	} else {
1246 		fltr->tunnel_type = TNL_LAST;
1247 	}
1248 
1249 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1250 		struct flow_match_basic match;
1251 
1252 		flow_rule_match_basic(rule, &match);
1253 
1254 		n_proto_key = ntohs(match.key->n_proto);
1255 		n_proto_mask = ntohs(match.mask->n_proto);
1256 
1257 		if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
1258 		    fltr->tunnel_type == TNL_GTPU ||
1259 		    fltr->tunnel_type == TNL_GTPC) {
1260 			n_proto_key = 0;
1261 			n_proto_mask = 0;
1262 		} else {
1263 			fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1264 		}
1265 
1266 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1267 		headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
1268 		headers->l3_key.ip_proto = match.key->ip_proto;
1269 	}
1270 
1271 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1272 		struct flow_match_eth_addrs match;
1273 
1274 		flow_rule_match_eth_addrs(rule, &match);
1275 
1276 		if (!is_zero_ether_addr(match.key->dst)) {
1277 			ether_addr_copy(headers->l2_key.dst_mac,
1278 					match.key->dst);
1279 			ether_addr_copy(headers->l2_mask.dst_mac,
1280 					match.mask->dst);
1281 			fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1282 		}
1283 
1284 		if (!is_zero_ether_addr(match.key->src)) {
1285 			ether_addr_copy(headers->l2_key.src_mac,
1286 					match.key->src);
1287 			ether_addr_copy(headers->l2_mask.src_mac,
1288 					match.mask->src);
1289 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
1290 		}
1291 	}
1292 
1293 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
1294 	    is_vlan_dev(filter_dev)) {
1295 		struct flow_dissector_key_vlan mask;
1296 		struct flow_dissector_key_vlan key;
1297 		struct flow_match_vlan match;
1298 
1299 		if (is_vlan_dev(filter_dev)) {
1300 			match.key = &key;
1301 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
1302 			match.key->vlan_priority = 0;
1303 			match.mask = &mask;
1304 			memset(match.mask, 0xff, sizeof(*match.mask));
1305 			match.mask->vlan_priority = 0;
1306 		} else {
1307 			flow_rule_match_vlan(rule, &match);
1308 		}
1309 
1310 		if (match.mask->vlan_id) {
1311 			if (match.mask->vlan_id == VLAN_VID_MASK) {
1312 				fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
1313 				headers->vlan_hdr.vlan_id =
1314 					cpu_to_be16(match.key->vlan_id &
1315 						    VLAN_VID_MASK);
1316 			} else {
1317 				NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
1318 				return -EINVAL;
1319 			}
1320 		}
1321 
1322 		if (match.mask->vlan_priority) {
1323 			fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
1324 			headers->vlan_hdr.vlan_prio =
1325 				cpu_to_be16((match.key->vlan_priority <<
1326 					     VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
1327 		}
1328 
1329 		if (match.mask->vlan_tpid)
1330 			headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
1331 	}
1332 
1333 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1334 		struct flow_match_vlan match;
1335 
1336 		if (!ice_is_dvm_ena(&vsi->back->hw)) {
1337 			NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
1338 			return -EINVAL;
1339 		}
1340 
1341 		flow_rule_match_cvlan(rule, &match);
1342 
1343 		if (match.mask->vlan_id) {
1344 			if (match.mask->vlan_id == VLAN_VID_MASK) {
1345 				fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
1346 				headers->cvlan_hdr.vlan_id =
1347 					cpu_to_be16(match.key->vlan_id &
1348 						    VLAN_VID_MASK);
1349 			} else {
1350 				NL_SET_ERR_MSG_MOD(fltr->extack,
1351 						   "Bad CVLAN mask");
1352 				return -EINVAL;
1353 			}
1354 		}
1355 
1356 		if (match.mask->vlan_priority) {
1357 			fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
1358 			headers->cvlan_hdr.vlan_prio =
1359 				cpu_to_be16((match.key->vlan_priority <<
1360 					     VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
1361 		}
1362 	}
1363 
1364 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
1365 		struct flow_match_pppoe match;
1366 
1367 		flow_rule_match_pppoe(rule, &match);
1368 		n_proto_key = ice_tc_set_pppoe(&match, fltr, headers);
1369 
1370 		/* If ethertype equals ETH_P_PPP_SES, n_proto might be
1371 		 * overwritten by encapsulated protocol (ppp_proto field) or set
1372 		 * to 0. To correct this, flow_match_pppoe provides the type
1373 		 * field, which contains the actual ethertype (ETH_P_PPP_SES).
1374 		 */
1375 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1376 		headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
1377 		fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1378 	}
1379 
1380 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1381 		struct flow_match_control match;
1382 
1383 		flow_rule_match_control(rule, &match);
1384 
1385 		addr_type = match.key->addr_type;
1386 	}
1387 
1388 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1389 		struct flow_match_ipv4_addrs match;
1390 
1391 		flow_rule_match_ipv4_addrs(rule, &match);
1392 		if (ice_tc_set_ipv4(&match, fltr, headers, false))
1393 			return -EINVAL;
1394 	}
1395 
1396 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1397 		struct flow_match_ipv6_addrs match;
1398 
1399 		flow_rule_match_ipv6_addrs(rule, &match);
1400 		if (ice_tc_set_ipv6(&match, fltr, headers, false))
1401 			return -EINVAL;
1402 	}
1403 
1404 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
1405 		struct flow_match_ip match;
1406 
1407 		flow_rule_match_ip(rule, &match);
1408 		ice_tc_set_tos_ttl(&match, fltr, headers, false);
1409 	}
1410 
1411 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_L2TPV3)) {
1412 		struct flow_match_l2tpv3 match;
1413 
1414 		flow_rule_match_l2tpv3(rule, &match);
1415 
1416 		fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID;
1417 		headers->l2tpv3_hdr.session_id = match.key->session_id;
1418 	}
1419 
1420 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1421 		struct flow_match_ports match;
1422 
1423 		flow_rule_match_ports(rule, &match);
1424 		if (ice_tc_set_port(match, fltr, headers, false))
1425 			return -EINVAL;
1426 		switch (headers->l3_key.ip_proto) {
1427 		case IPPROTO_TCP:
1428 		case IPPROTO_UDP:
1429 			break;
1430 		default:
1431 			NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
1432 			return -EINVAL;
1433 		}
1434 	}
1435 	return 0;
1436 }
1437 
1438 /**
1439  * ice_add_switch_fltr - Add TC flower filters
1440  * @vsi: Pointer to VSI
1441  * @fltr: Pointer to struct ice_tc_flower_fltr
1442  *
1443  * Add filter in HW switch block
1444  */
1445 static int
1446 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1447 {
1448 	if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
1449 		return -EOPNOTSUPP;
1450 
1451 	if (ice_is_eswitch_mode_switchdev(vsi->back))
1452 		return ice_eswitch_add_tc_fltr(vsi, fltr);
1453 
1454 	return ice_add_tc_flower_adv_fltr(vsi, fltr);
1455 }
1456 
1457 /**
1458  * ice_handle_tclass_action - Support directing to a traffic class
1459  * @vsi: Pointer to VSI
1460  * @cls_flower: Pointer to TC flower offload structure
1461  * @fltr: Pointer to TC flower filter structure
1462  *
1463  * Support directing traffic to a traffic class
1464  */
1465 static int
1466 ice_handle_tclass_action(struct ice_vsi *vsi,
1467 			 struct flow_cls_offload *cls_flower,
1468 			 struct ice_tc_flower_fltr *fltr)
1469 {
1470 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
1471 	struct ice_vsi *main_vsi;
1472 
1473 	if (tc < 0) {
1474 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
1475 		return -EINVAL;
1476 	}
1477 	if (!tc) {
1478 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
1479 		return -EINVAL;
1480 	}
1481 
1482 	if (!(vsi->all_enatc & BIT(tc))) {
1483 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
1484 		return -EINVAL;
1485 	}
1486 
1487 	/* Redirect to a TC class or Queue Group */
1488 	main_vsi = ice_get_main_vsi(vsi->back);
1489 	if (!main_vsi || !main_vsi->netdev) {
1490 		NL_SET_ERR_MSG_MOD(fltr->extack,
1491 				   "Unable to add filter because of invalid netdevice");
1492 		return -EINVAL;
1493 	}
1494 
1495 	if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
1496 	    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1497 			   ICE_TC_FLWR_FIELD_SRC_MAC))) {
1498 		NL_SET_ERR_MSG_MOD(fltr->extack,
1499 				   "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
1500 		return -EOPNOTSUPP;
1501 	}
1502 
1503 	/* For ADQ, filter must include dest MAC address, otherwise unwanted
1504 	 * packets with unrelated MAC address get delivered to ADQ VSIs as long
1505 	 * as remaining filter criteria is satisfied such as dest IP address
1506 	 * and dest/src L4 port. Following code is trying to handle:
1507 	 * 1. For non-tunnel, if user specify MAC addresses, use them (means
1508 	 * this code won't do anything
1509 	 * 2. For non-tunnel, if user didn't specify MAC address, add implicit
1510 	 * dest MAC to be lower netdev's active unicast MAC address
1511 	 * 3. For tunnel,  as of now TC-filter through flower classifier doesn't
1512 	 * have provision for user to specify outer DMAC, hence driver to
1513 	 * implicitly add outer dest MAC to be lower netdev's active unicast
1514 	 * MAC address.
1515 	 */
1516 	if (fltr->tunnel_type != TNL_LAST &&
1517 	    !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
1518 		fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
1519 
1520 	if (fltr->tunnel_type == TNL_LAST &&
1521 	    !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
1522 		fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1523 
1524 	if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1525 			   ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
1526 		ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
1527 				vsi->netdev->dev_addr);
1528 		eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
1529 	}
1530 
1531 	/* validate specified dest MAC address, make sure either it belongs to
1532 	 * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
1533 	 * unicast MAC filter destined to main VSI.
1534 	 */
1535 	if (!ice_mac_fltr_exist(&main_vsi->back->hw,
1536 				fltr->outer_headers.l2_key.dst_mac,
1537 				main_vsi->idx)) {
1538 		NL_SET_ERR_MSG_MOD(fltr->extack,
1539 				   "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
1540 		return -EINVAL;
1541 	}
1542 
1543 	/* Make sure VLAN is already added to main VSI, before allowing ADQ to
1544 	 * add a VLAN based filter such as MAC + VLAN + L4 port.
1545 	 */
1546 	if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
1547 		u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
1548 
1549 		if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
1550 					 main_vsi->idx)) {
1551 			NL_SET_ERR_MSG_MOD(fltr->extack,
1552 					   "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
1553 			return -EINVAL;
1554 		}
1555 	}
1556 	fltr->action.fltr_act = ICE_FWD_TO_VSI;
1557 	fltr->action.tc_class = tc;
1558 
1559 	return 0;
1560 }
1561 
1562 /**
1563  * ice_parse_tc_flower_actions - Parse the actions for a TC filter
1564  * @vsi: Pointer to VSI
1565  * @cls_flower: Pointer to TC flower offload structure
1566  * @fltr: Pointer to TC flower filter structure
1567  *
1568  * Parse the actions for a TC filter
1569  */
1570 static int
1571 ice_parse_tc_flower_actions(struct ice_vsi *vsi,
1572 			    struct flow_cls_offload *cls_flower,
1573 			    struct ice_tc_flower_fltr *fltr)
1574 {
1575 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1576 	struct flow_action *flow_action = &rule->action;
1577 	struct flow_action_entry *act;
1578 	int i;
1579 
1580 	if (cls_flower->classid)
1581 		return ice_handle_tclass_action(vsi, cls_flower, fltr);
1582 
1583 	if (!flow_action_has_entries(flow_action))
1584 		return -EINVAL;
1585 
1586 	flow_action_for_each(i, act, flow_action) {
1587 		if (ice_is_eswitch_mode_switchdev(vsi->back)) {
1588 			int err = ice_eswitch_tc_parse_action(fltr, act);
1589 
1590 			if (err)
1591 				return err;
1592 			continue;
1593 		}
1594 		/* Allow only one rule per filter */
1595 
1596 		/* Drop action */
1597 		if (act->id == FLOW_ACTION_DROP) {
1598 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
1599 			return -EINVAL;
1600 		}
1601 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
1602 	}
1603 	return 0;
1604 }
1605 
1606 /**
1607  * ice_del_tc_fltr - deletes a filter from HW table
1608  * @vsi: Pointer to VSI
1609  * @fltr: Pointer to struct ice_tc_flower_fltr
1610  *
1611  * This function deletes a filter from HW table and manages book-keeping
1612  */
1613 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1614 {
1615 	struct ice_rule_query_data rule_rem;
1616 	struct ice_pf *pf = vsi->back;
1617 	int err;
1618 
1619 	rule_rem.rid = fltr->rid;
1620 	rule_rem.rule_id = fltr->rule_id;
1621 	rule_rem.vsi_handle = fltr->dest_id;
1622 	err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
1623 	if (err) {
1624 		if (err == -ENOENT) {
1625 			NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
1626 			return -ENOENT;
1627 		}
1628 		NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
1629 		return -EIO;
1630 	}
1631 
1632 	/* update advanced switch filter count for destination
1633 	 * VSI if filter destination was VSI
1634 	 */
1635 	if (fltr->dest_vsi) {
1636 		if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
1637 			fltr->dest_vsi->num_chnl_fltr--;
1638 
1639 			/* keeps track of channel filters for PF VSI */
1640 			if (vsi->type == ICE_VSI_PF &&
1641 			    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1642 					    ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
1643 				pf->num_dmac_chnl_fltrs--;
1644 		}
1645 	}
1646 	return 0;
1647 }
1648 
1649 /**
1650  * ice_add_tc_fltr - adds a TC flower filter
1651  * @netdev: Pointer to netdev
1652  * @vsi: Pointer to VSI
1653  * @f: Pointer to flower offload structure
1654  * @__fltr: Pointer to struct ice_tc_flower_fltr
1655  *
1656  * This function parses TC-flower input fields, parses action,
1657  * and adds a filter.
1658  */
1659 static int
1660 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
1661 		struct flow_cls_offload *f,
1662 		struct ice_tc_flower_fltr **__fltr)
1663 {
1664 	struct ice_tc_flower_fltr *fltr;
1665 	int err;
1666 
1667 	/* by default, set output to be INVALID */
1668 	*__fltr = NULL;
1669 
1670 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1671 	if (!fltr)
1672 		return -ENOMEM;
1673 
1674 	fltr->cookie = f->cookie;
1675 	fltr->extack = f->common.extack;
1676 	fltr->src_vsi = vsi;
1677 	INIT_HLIST_NODE(&fltr->tc_flower_node);
1678 
1679 	err = ice_parse_cls_flower(netdev, vsi, f, fltr);
1680 	if (err < 0)
1681 		goto err;
1682 
1683 	err = ice_parse_tc_flower_actions(vsi, f, fltr);
1684 	if (err < 0)
1685 		goto err;
1686 
1687 	err = ice_add_switch_fltr(vsi, fltr);
1688 	if (err < 0)
1689 		goto err;
1690 
1691 	/* return the newly created filter */
1692 	*__fltr = fltr;
1693 
1694 	return 0;
1695 err:
1696 	kfree(fltr);
1697 	return err;
1698 }
1699 
1700 /**
1701  * ice_find_tc_flower_fltr - Find the TC flower filter in the list
1702  * @pf: Pointer to PF
1703  * @cookie: filter specific cookie
1704  */
1705 static struct ice_tc_flower_fltr *
1706 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
1707 {
1708 	struct ice_tc_flower_fltr *fltr;
1709 
1710 	hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
1711 		if (cookie == fltr->cookie)
1712 			return fltr;
1713 
1714 	return NULL;
1715 }
1716 
1717 /**
1718  * ice_add_cls_flower - add TC flower filters
1719  * @netdev: Pointer to filter device
1720  * @vsi: Pointer to VSI
1721  * @cls_flower: Pointer to flower offload structure
1722  */
1723 int
1724 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
1725 		   struct flow_cls_offload *cls_flower)
1726 {
1727 	struct netlink_ext_ack *extack = cls_flower->common.extack;
1728 	struct net_device *vsi_netdev = vsi->netdev;
1729 	struct ice_tc_flower_fltr *fltr;
1730 	struct ice_pf *pf = vsi->back;
1731 	int err;
1732 
1733 	if (ice_is_reset_in_progress(pf->state))
1734 		return -EBUSY;
1735 	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
1736 		return -EINVAL;
1737 
1738 	if (ice_is_port_repr_netdev(netdev))
1739 		vsi_netdev = netdev;
1740 
1741 	if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
1742 	    !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
1743 		/* Based on TC indirect notifications from kernel, all ice
1744 		 * devices get an instance of rule from higher level device.
1745 		 * Avoid triggering explicit error in this case.
1746 		 */
1747 		if (netdev == vsi_netdev)
1748 			NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
1749 		return -EINVAL;
1750 	}
1751 
1752 	/* avoid duplicate entries, if exists - return error */
1753 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1754 	if (fltr) {
1755 		NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
1756 		return -EEXIST;
1757 	}
1758 
1759 	/* prep and add TC-flower filter in HW */
1760 	err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
1761 	if (err)
1762 		return err;
1763 
1764 	/* add filter into an ordered list */
1765 	hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
1766 	return 0;
1767 }
1768 
1769 /**
1770  * ice_del_cls_flower - delete TC flower filters
1771  * @vsi: Pointer to VSI
1772  * @cls_flower: Pointer to struct flow_cls_offload
1773  */
1774 int
1775 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
1776 {
1777 	struct ice_tc_flower_fltr *fltr;
1778 	struct ice_pf *pf = vsi->back;
1779 	int err;
1780 
1781 	/* find filter */
1782 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1783 	if (!fltr) {
1784 		if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
1785 		    hlist_empty(&pf->tc_flower_fltr_list))
1786 			return 0;
1787 
1788 		NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
1789 		return -EINVAL;
1790 	}
1791 
1792 	fltr->extack = cls_flower->common.extack;
1793 	/* delete filter from HW */
1794 	err = ice_del_tc_fltr(vsi, fltr);
1795 	if (err)
1796 		return err;
1797 
1798 	/* delete filter from an ordered list */
1799 	hlist_del(&fltr->tc_flower_node);
1800 
1801 	/* free the filter node */
1802 	kfree(fltr);
1803 
1804 	return 0;
1805 }
1806 
1807 /**
1808  * ice_replay_tc_fltrs - replay TC filters
1809  * @pf: pointer to PF struct
1810  */
1811 void ice_replay_tc_fltrs(struct ice_pf *pf)
1812 {
1813 	struct ice_tc_flower_fltr *fltr;
1814 	struct hlist_node *node;
1815 
1816 	hlist_for_each_entry_safe(fltr, node,
1817 				  &pf->tc_flower_fltr_list,
1818 				  tc_flower_node) {
1819 		fltr->extack = NULL;
1820 		ice_add_switch_fltr(fltr->src_vsi, fltr);
1821 	}
1822 }
1823