1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP API
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/tcp.h>
8 
9 #include "sparx5_tc.h"
10 #include "vcap_api.h"
11 #include "vcap_api_client.h"
12 #include "sparx5_main.h"
13 #include "sparx5_vcap_impl.h"
14 
15 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
16 
17 /* Collect keysets and type ids for multiple rules per size */
18 struct sparx5_wildcard_rule {
19 	bool selected;
20 	u8 value;
21 	u8 mask;
22 	enum vcap_keyfield_set keyset;
23 };
24 
25 struct sparx5_multiple_rules {
26 	struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
27 };
28 
29 struct sparx5_tc_flower_parse_usage {
30 	struct flow_cls_offload *fco;
31 	struct flow_rule *frule;
32 	struct vcap_rule *vrule;
33 	u16 l3_proto;
34 	u8 l4_proto;
35 	unsigned int used_keys;
36 };
37 
38 struct sparx5_tc_rule_pkt_cnt {
39 	u64 cookie;
40 	u32 pkts;
41 };
42 
43 /* These protocols have dedicated keysets in IS2 and a TC dissector
44  * ETH_P_ARP does not have a TC dissector
45  */
46 static u16 sparx5_tc_known_etypes[] = {
47 	ETH_P_ALL,
48 	ETH_P_ARP,
49 	ETH_P_IP,
50 	ETH_P_IPV6,
51 };
52 
53 enum sparx5_is2_arp_opcode {
54 	SPX5_IS2_ARP_REQUEST,
55 	SPX5_IS2_ARP_REPLY,
56 	SPX5_IS2_RARP_REQUEST,
57 	SPX5_IS2_RARP_REPLY,
58 };
59 
60 enum tc_arp_opcode {
61 	TC_ARP_OP_RESERVED,
62 	TC_ARP_OP_REQUEST,
63 	TC_ARP_OP_REPLY,
64 };
65 
66 static bool sparx5_tc_is_known_etype(u16 etype)
67 {
68 	int idx;
69 
70 	/* For now this only knows about IS2 traffic classification */
71 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
72 		if (sparx5_tc_known_etypes[idx] == etype)
73 			return true;
74 
75 	return false;
76 }
77 
78 static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
79 {
80 	enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
81 	enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
82 	struct flow_match_eth_addrs match;
83 	struct vcap_u48_key smac, dmac;
84 	int err = 0;
85 
86 	flow_rule_match_eth_addrs(st->frule, &match);
87 
88 	if (!is_zero_ether_addr(match.mask->src)) {
89 		vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
90 		vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
91 		err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
92 		if (err)
93 			goto out;
94 	}
95 
96 	if (!is_zero_ether_addr(match.mask->dst)) {
97 		vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
98 		vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
99 		err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
100 		if (err)
101 			goto out;
102 	}
103 
104 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
105 
106 	return err;
107 
108 out:
109 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
110 	return err;
111 }
112 
113 static int
114 sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
115 {
116 	int err = 0;
117 
118 	if (st->l3_proto == ETH_P_IP) {
119 		struct flow_match_ipv4_addrs mt;
120 
121 		flow_rule_match_ipv4_addrs(st->frule, &mt);
122 		if (mt.mask->src) {
123 			err = vcap_rule_add_key_u32(st->vrule,
124 						    VCAP_KF_L3_IP4_SIP,
125 						    be32_to_cpu(mt.key->src),
126 						    be32_to_cpu(mt.mask->src));
127 			if (err)
128 				goto out;
129 		}
130 		if (mt.mask->dst) {
131 			err = vcap_rule_add_key_u32(st->vrule,
132 						    VCAP_KF_L3_IP4_DIP,
133 						    be32_to_cpu(mt.key->dst),
134 						    be32_to_cpu(mt.mask->dst));
135 			if (err)
136 				goto out;
137 		}
138 	}
139 
140 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
141 
142 	return err;
143 
144 out:
145 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
146 	return err;
147 }
148 
149 static int
150 sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
151 {
152 	int err = 0;
153 
154 	if (st->l3_proto == ETH_P_IPV6) {
155 		struct flow_match_ipv6_addrs mt;
156 		struct vcap_u128_key sip;
157 		struct vcap_u128_key dip;
158 
159 		flow_rule_match_ipv6_addrs(st->frule, &mt);
160 		/* Check if address masks are non-zero */
161 		if (!ipv6_addr_any(&mt.mask->src)) {
162 			vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
163 			vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
164 			err = vcap_rule_add_key_u128(st->vrule,
165 						     VCAP_KF_L3_IP6_SIP, &sip);
166 			if (err)
167 				goto out;
168 		}
169 		if (!ipv6_addr_any(&mt.mask->dst)) {
170 			vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
171 			vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
172 			err = vcap_rule_add_key_u128(st->vrule,
173 						     VCAP_KF_L3_IP6_DIP, &dip);
174 			if (err)
175 				goto out;
176 		}
177 	}
178 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
179 	return err;
180 out:
181 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
182 	return err;
183 }
184 
185 static int
186 sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
187 {
188 	struct flow_match_control mt;
189 	u32 value, mask;
190 	int err = 0;
191 
192 	flow_rule_match_control(st->frule, &mt);
193 
194 	if (mt.mask->flags) {
195 		if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
196 			if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
197 				value = 1; /* initial fragment */
198 				mask = 0x3;
199 			} else {
200 				if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
201 					value = 3; /* follow up fragment */
202 					mask = 0x3;
203 				} else {
204 					value = 0; /* no fragment */
205 					mask = 0x3;
206 				}
207 			}
208 		} else {
209 			if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
210 				value = 3; /* follow up fragment */
211 				mask = 0x3;
212 			} else {
213 				value = 0; /* no fragment */
214 				mask = 0x3;
215 			}
216 		}
217 
218 		err = vcap_rule_add_key_u32(st->vrule,
219 					    VCAP_KF_L3_FRAGMENT_TYPE,
220 					    value, mask);
221 		if (err)
222 			goto out;
223 	}
224 
225 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
226 
227 	return err;
228 
229 out:
230 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
231 	return err;
232 }
233 
234 static int
235 sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
236 {
237 	struct flow_match_ports mt;
238 	u16 value, mask;
239 	int err = 0;
240 
241 	flow_rule_match_ports(st->frule, &mt);
242 
243 	if (mt.mask->src) {
244 		value = be16_to_cpu(mt.key->src);
245 		mask = be16_to_cpu(mt.mask->src);
246 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
247 					    mask);
248 		if (err)
249 			goto out;
250 	}
251 
252 	if (mt.mask->dst) {
253 		value = be16_to_cpu(mt.key->dst);
254 		mask = be16_to_cpu(mt.mask->dst);
255 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
256 					    mask);
257 		if (err)
258 			goto out;
259 	}
260 
261 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
262 
263 	return err;
264 
265 out:
266 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
267 	return err;
268 }
269 
270 static int
271 sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
272 {
273 	struct flow_match_basic mt;
274 	int err = 0;
275 
276 	flow_rule_match_basic(st->frule, &mt);
277 
278 	if (mt.mask->n_proto) {
279 		st->l3_proto = be16_to_cpu(mt.key->n_proto);
280 		if (!sparx5_tc_is_known_etype(st->l3_proto)) {
281 			err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
282 						    st->l3_proto, ~0);
283 			if (err)
284 				goto out;
285 		} else if (st->l3_proto == ETH_P_IP) {
286 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
287 						    VCAP_BIT_1);
288 			if (err)
289 				goto out;
290 		} else if (st->l3_proto == ETH_P_IPV6) {
291 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
292 						    VCAP_BIT_0);
293 			if (err)
294 				goto out;
295 		}
296 	}
297 
298 	if (mt.mask->ip_proto) {
299 		st->l4_proto = mt.key->ip_proto;
300 		if (st->l4_proto == IPPROTO_TCP) {
301 			err = vcap_rule_add_key_bit(st->vrule,
302 						    VCAP_KF_TCP_IS,
303 						    VCAP_BIT_1);
304 			if (err)
305 				goto out;
306 		} else if (st->l4_proto == IPPROTO_UDP) {
307 			err = vcap_rule_add_key_bit(st->vrule,
308 						    VCAP_KF_TCP_IS,
309 						    VCAP_BIT_0);
310 			if (err)
311 				goto out;
312 		} else {
313 			err = vcap_rule_add_key_u32(st->vrule,
314 						    VCAP_KF_L3_IP_PROTO,
315 						    st->l4_proto, ~0);
316 			if (err)
317 				goto out;
318 		}
319 	}
320 
321 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
322 
323 	return err;
324 
325 out:
326 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
327 	return err;
328 }
329 
330 static int
331 sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
332 {
333 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
334 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
335 	struct flow_match_vlan mt;
336 	int err;
337 
338 	flow_rule_match_vlan(st->frule, &mt);
339 
340 	if (mt.mask->vlan_id) {
341 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
342 					    mt.key->vlan_id,
343 					    mt.mask->vlan_id);
344 		if (err)
345 			goto out;
346 	}
347 
348 	if (mt.mask->vlan_priority) {
349 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
350 					    mt.key->vlan_priority,
351 					    mt.mask->vlan_priority);
352 		if (err)
353 			goto out;
354 	}
355 
356 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
357 
358 	return 0;
359 out:
360 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
361 	return err;
362 }
363 
364 static int
365 sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
366 {
367 	struct flow_match_tcp mt;
368 	u16 tcp_flags_mask;
369 	u16 tcp_flags_key;
370 	enum vcap_bit val;
371 	int err = 0;
372 
373 	flow_rule_match_tcp(st->frule, &mt);
374 	tcp_flags_key = be16_to_cpu(mt.key->flags);
375 	tcp_flags_mask = be16_to_cpu(mt.mask->flags);
376 
377 	if (tcp_flags_mask & TCPHDR_FIN) {
378 		val = VCAP_BIT_0;
379 		if (tcp_flags_key & TCPHDR_FIN)
380 			val = VCAP_BIT_1;
381 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
382 		if (err)
383 			goto out;
384 	}
385 
386 	if (tcp_flags_mask & TCPHDR_SYN) {
387 		val = VCAP_BIT_0;
388 		if (tcp_flags_key & TCPHDR_SYN)
389 			val = VCAP_BIT_1;
390 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
391 		if (err)
392 			goto out;
393 	}
394 
395 	if (tcp_flags_mask & TCPHDR_RST) {
396 		val = VCAP_BIT_0;
397 		if (tcp_flags_key & TCPHDR_RST)
398 			val = VCAP_BIT_1;
399 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
400 		if (err)
401 			goto out;
402 	}
403 
404 	if (tcp_flags_mask & TCPHDR_PSH) {
405 		val = VCAP_BIT_0;
406 		if (tcp_flags_key & TCPHDR_PSH)
407 			val = VCAP_BIT_1;
408 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
409 		if (err)
410 			goto out;
411 	}
412 
413 	if (tcp_flags_mask & TCPHDR_ACK) {
414 		val = VCAP_BIT_0;
415 		if (tcp_flags_key & TCPHDR_ACK)
416 			val = VCAP_BIT_1;
417 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
418 		if (err)
419 			goto out;
420 	}
421 
422 	if (tcp_flags_mask & TCPHDR_URG) {
423 		val = VCAP_BIT_0;
424 		if (tcp_flags_key & TCPHDR_URG)
425 			val = VCAP_BIT_1;
426 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
427 		if (err)
428 			goto out;
429 	}
430 
431 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
432 
433 	return err;
434 
435 out:
436 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
437 	return err;
438 }
439 
440 static int
441 sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
442 {
443 	struct flow_match_arp mt;
444 	u16 value, mask;
445 	u32 ipval, ipmsk;
446 	int err;
447 
448 	flow_rule_match_arp(st->frule, &mt);
449 
450 	if (mt.mask->op) {
451 		mask = 0x3;
452 		if (st->l3_proto == ETH_P_ARP) {
453 			value = mt.key->op == TC_ARP_OP_REQUEST ?
454 					SPX5_IS2_ARP_REQUEST :
455 					SPX5_IS2_ARP_REPLY;
456 		} else { /* RARP */
457 			value = mt.key->op == TC_ARP_OP_REQUEST ?
458 					SPX5_IS2_RARP_REQUEST :
459 					SPX5_IS2_RARP_REPLY;
460 		}
461 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
462 					    value, mask);
463 		if (err)
464 			goto out;
465 	}
466 
467 	/* The IS2 ARP keyset does not support ARP hardware addresses */
468 	if (!is_zero_ether_addr(mt.mask->sha) ||
469 	    !is_zero_ether_addr(mt.mask->tha)) {
470 		err = -EINVAL;
471 		goto out;
472 	}
473 
474 	if (mt.mask->sip) {
475 		ipval = be32_to_cpu((__force __be32)mt.key->sip);
476 		ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
477 
478 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
479 					    ipval, ipmsk);
480 		if (err)
481 			goto out;
482 	}
483 
484 	if (mt.mask->tip) {
485 		ipval = be32_to_cpu((__force __be32)mt.key->tip);
486 		ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
487 
488 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
489 					    ipval, ipmsk);
490 		if (err)
491 			goto out;
492 	}
493 
494 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
495 
496 	return 0;
497 
498 out:
499 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
500 	return err;
501 }
502 
503 static int
504 sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
505 {
506 	struct flow_match_ip mt;
507 	int err = 0;
508 
509 	flow_rule_match_ip(st->frule, &mt);
510 
511 	if (mt.mask->tos) {
512 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
513 					    mt.key->tos,
514 					    mt.mask->tos);
515 		if (err)
516 			goto out;
517 	}
518 
519 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
520 
521 	return err;
522 
523 out:
524 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
525 	return err;
526 }
527 
528 static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
529 	[FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
530 	[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
531 	[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
532 	[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
533 	[FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
534 	[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
535 	[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
536 	[FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
537 	[FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
538 	[FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
539 };
540 
541 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
542 				    struct vcap_admin *admin,
543 				    struct vcap_rule *vrule,
544 				    u16 *l3_proto)
545 {
546 	struct sparx5_tc_flower_parse_usage state = {
547 		.fco = fco,
548 		.vrule = vrule,
549 		.l3_proto = ETH_P_ALL,
550 	};
551 	int idx, err = 0;
552 
553 	state.frule = flow_cls_offload_flow_rule(fco);
554 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
555 		if (!flow_rule_match_key(state.frule, idx))
556 			continue;
557 		if (!sparx5_tc_flower_usage_handlers[idx])
558 			continue;
559 		err = sparx5_tc_flower_usage_handlers[idx](&state);
560 		if (err)
561 			return err;
562 	}
563 
564 	if (state.frule->match.dissector->used_keys ^ state.used_keys) {
565 		NL_SET_ERR_MSG_MOD(fco->common.extack,
566 				   "Unsupported match item");
567 		return -ENOENT;
568 	}
569 
570 	if (l3_proto)
571 		*l3_proto = state.l3_proto;
572 	return err;
573 }
574 
575 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
576 					 struct flow_cls_offload *fco,
577 					 struct vcap_admin *admin)
578 {
579 	struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
580 	struct flow_action_entry *actent, *last_actent = NULL;
581 	struct flow_action *act = &rule->action;
582 	u64 action_mask = 0;
583 	int idx;
584 
585 	if (!flow_action_has_entries(act)) {
586 		NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
587 		return -EINVAL;
588 	}
589 
590 	if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
591 		return -EOPNOTSUPP;
592 
593 	flow_action_for_each(idx, actent, act) {
594 		if (action_mask & BIT(actent->id)) {
595 			NL_SET_ERR_MSG_MOD(fco->common.extack,
596 					   "More actions of the same type");
597 			return -EINVAL;
598 		}
599 		action_mask |= BIT(actent->id);
600 		last_actent = actent; /* Save last action for later check */
601 	}
602 
603 	/* Check that last action is a goto */
604 	if (last_actent->id != FLOW_ACTION_GOTO) {
605 		NL_SET_ERR_MSG_MOD(fco->common.extack,
606 				   "Last action must be 'goto'");
607 		return -EINVAL;
608 	}
609 
610 	/* Check if the goto chain is in the next lookup */
611 	if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
612 				 last_actent->chain_index)) {
613 		NL_SET_ERR_MSG_MOD(fco->common.extack,
614 				   "Invalid goto chain");
615 		return -EINVAL;
616 	}
617 
618 	/* Catch unsupported combinations of actions */
619 	if (action_mask & BIT(FLOW_ACTION_TRAP) &&
620 	    action_mask & BIT(FLOW_ACTION_ACCEPT)) {
621 		NL_SET_ERR_MSG_MOD(fco->common.extack,
622 				   "Cannot combine pass and trap action");
623 		return -EOPNOTSUPP;
624 	}
625 
626 	return 0;
627 }
628 
629 /* Add a rule counter action - only IS2 is considered for now */
630 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
631 				      struct vcap_rule *vrule)
632 {
633 	int err;
634 
635 	err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID, vrule->id);
636 	if (err)
637 		return err;
638 
639 	vcap_rule_set_counter_id(vrule, vrule->id);
640 	return err;
641 }
642 
643 /* Collect all port keysets and apply the first of them, possibly wildcarded */
644 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
645 					    struct vcap_rule *vrule,
646 					    struct vcap_admin *admin,
647 					    u16 l3_proto,
648 					    struct sparx5_multiple_rules *multi)
649 {
650 	struct sparx5_port *port = netdev_priv(ndev);
651 	struct vcap_keyset_list portkeysetlist = {};
652 	enum vcap_keyfield_set portkeysets[10] = {};
653 	struct vcap_keyset_list matches = {};
654 	enum vcap_keyfield_set keysets[10];
655 	int idx, jdx, err = 0, count = 0;
656 	struct sparx5_wildcard_rule *mru;
657 	const struct vcap_set *kinfo;
658 	struct vcap_control *vctrl;
659 
660 	vctrl = port->sparx5->vcap_ctrl;
661 
662 	/* Find the keysets that the rule can use */
663 	matches.keysets = keysets;
664 	matches.max = ARRAY_SIZE(keysets);
665 	if (vcap_rule_find_keysets(vrule, &matches) == 0)
666 		return -EINVAL;
667 
668 	/* Find the keysets that the port configuration supports */
669 	portkeysetlist.max = ARRAY_SIZE(portkeysets);
670 	portkeysetlist.keysets = portkeysets;
671 	err = sparx5_vcap_get_port_keyset(ndev,
672 					  admin, vrule->vcap_chain_id,
673 					  l3_proto,
674 					  &portkeysetlist);
675 	if (err)
676 		return err;
677 
678 	/* Find the intersection of the two sets of keyset */
679 	for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
680 		kinfo = vcap_keyfieldset(vctrl, admin->vtype,
681 					 portkeysetlist.keysets[idx]);
682 		if (!kinfo)
683 			continue;
684 
685 		/* Find a port keyset that matches the required keys
686 		 * If there are multiple keysets then compose a type id mask
687 		 */
688 		for (jdx = 0; jdx < matches.cnt; ++jdx) {
689 			if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
690 				continue;
691 
692 			mru = &multi->rule[kinfo->sw_per_item];
693 			if (!mru->selected) {
694 				mru->selected = true;
695 				mru->keyset = portkeysetlist.keysets[idx];
696 				mru->value = kinfo->type_id;
697 			}
698 			mru->value &= kinfo->type_id;
699 			mru->mask |= kinfo->type_id;
700 			++count;
701 		}
702 	}
703 	if (count == 0)
704 		return -EPROTO;
705 
706 	if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
707 		return -ENOENT;
708 
709 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
710 		mru = &multi->rule[idx];
711 		if (!mru->selected)
712 			continue;
713 
714 		/* Align the mask to the combined value */
715 		mru->mask ^= mru->value;
716 	}
717 
718 	/* Set the chosen keyset on the rule and set a wildcarded type if there
719 	 * are more than one keyset
720 	 */
721 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
722 		mru = &multi->rule[idx];
723 		if (!mru->selected)
724 			continue;
725 
726 		vcap_set_rule_set_keyset(vrule, mru->keyset);
727 		if (count > 1)
728 			/* Some keysets do not have a type field */
729 			vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
730 					      mru->value,
731 					      ~mru->mask);
732 		mru->selected = false; /* mark as done */
733 		break; /* Stop here and add more rules later */
734 	}
735 	return err;
736 }
737 
738 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
739 				   struct flow_cls_offload *fco,
740 				   struct vcap_rule *erule,
741 				   struct vcap_admin *admin,
742 				   struct sparx5_wildcard_rule *rule)
743 {
744 	enum vcap_key_field keylist[] = {
745 		VCAP_KF_IF_IGR_PORT_MASK,
746 		VCAP_KF_IF_IGR_PORT_MASK_SEL,
747 		VCAP_KF_IF_IGR_PORT_MASK_RNG,
748 		VCAP_KF_LOOKUP_FIRST_IS,
749 		VCAP_KF_TYPE,
750 	};
751 	struct vcap_rule *vrule;
752 	int err;
753 
754 	/* Add an extra rule with a special user and the new keyset */
755 	erule->user = VCAP_USER_TC_EXTRA;
756 	vrule = vcap_copy_rule(erule);
757 	if (IS_ERR(vrule))
758 		return PTR_ERR(vrule);
759 
760 	/* Link the new rule to the existing rule with the cookie */
761 	vrule->cookie = erule->cookie;
762 	vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
763 	err = vcap_set_rule_set_keyset(vrule, rule->keyset);
764 	if (err) {
765 		pr_err("%s:%d: could not set keyset %s in rule: %u\n",
766 		       __func__, __LINE__,
767 		       vcap_keyset_name(vctrl, rule->keyset),
768 		       vrule->id);
769 		goto out;
770 	}
771 
772 	/* Some keysets do not have a type field, so ignore return value */
773 	vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
774 
775 	err = vcap_set_rule_set_actionset(vrule, erule->actionset);
776 	if (err)
777 		goto out;
778 
779 	err = sparx5_tc_add_rule_counter(admin, vrule);
780 	if (err)
781 		goto out;
782 
783 	err = vcap_val_rule(vrule, ETH_P_ALL);
784 	if (err) {
785 		pr_err("%s:%d: could not validate rule: %u\n",
786 		       __func__, __LINE__, vrule->id);
787 		vcap_set_tc_exterr(fco, vrule);
788 		goto out;
789 	}
790 	err = vcap_add_rule(vrule);
791 	if (err) {
792 		pr_err("%s:%d: could not add rule: %u\n",
793 		       __func__, __LINE__, vrule->id);
794 		goto out;
795 	}
796 out:
797 	vcap_free_rule(vrule);
798 	return err;
799 }
800 
801 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
802 					 struct flow_cls_offload *fco,
803 					 struct vcap_rule *erule,
804 					 struct vcap_admin *admin,
805 					 struct sparx5_multiple_rules *multi)
806 {
807 	int idx, err = 0;
808 
809 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
810 		if (!multi->rule[idx].selected)
811 			continue;
812 
813 		err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
814 					      &multi->rule[idx]);
815 		if (err)
816 			break;
817 	}
818 	return err;
819 }
820 
821 static int sparx5_tc_flower_replace(struct net_device *ndev,
822 				    struct flow_cls_offload *fco,
823 				    struct vcap_admin *admin)
824 {
825 	struct sparx5_port *port = netdev_priv(ndev);
826 	struct sparx5_multiple_rules multi = {};
827 	struct flow_action_entry *act;
828 	struct vcap_control *vctrl;
829 	struct flow_rule *frule;
830 	struct vcap_rule *vrule;
831 	u16 l3_proto;
832 	int err, idx;
833 
834 	vctrl = port->sparx5->vcap_ctrl;
835 
836 	err = sparx5_tc_flower_action_check(vctrl, fco, admin);
837 	if (err)
838 		return err;
839 
840 	vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
841 				fco->common.prio, 0);
842 	if (IS_ERR(vrule))
843 		return PTR_ERR(vrule);
844 
845 	vrule->cookie = fco->cookie;
846 
847 	l3_proto = ETH_P_ALL;
848 	err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
849 	if (err)
850 		goto out;
851 
852 	err = sparx5_tc_add_rule_counter(admin, vrule);
853 	if (err)
854 		goto out;
855 
856 	frule = flow_cls_offload_flow_rule(fco);
857 	flow_action_for_each(idx, act, &frule->action) {
858 		switch (act->id) {
859 		case FLOW_ACTION_TRAP:
860 			err = vcap_rule_add_action_bit(vrule,
861 						       VCAP_AF_CPU_COPY_ENA,
862 						       VCAP_BIT_1);
863 			if (err)
864 				goto out;
865 			err = vcap_rule_add_action_u32(vrule,
866 						       VCAP_AF_CPU_QUEUE_NUM, 0);
867 			if (err)
868 				goto out;
869 			err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
870 						       SPX5_PMM_REPLACE_ALL);
871 			if (err)
872 				goto out;
873 			/* For now the actionset is hardcoded */
874 			err = vcap_set_rule_set_actionset(vrule,
875 							  VCAP_AFS_BASE_TYPE);
876 			if (err)
877 				goto out;
878 			break;
879 		case FLOW_ACTION_ACCEPT:
880 			/* For now the actionset is hardcoded */
881 			err = vcap_set_rule_set_actionset(vrule,
882 							  VCAP_AFS_BASE_TYPE);
883 			if (err)
884 				goto out;
885 			break;
886 		case FLOW_ACTION_GOTO:
887 			/* Links between VCAPs will be added later */
888 			break;
889 		default:
890 			NL_SET_ERR_MSG_MOD(fco->common.extack,
891 					   "Unsupported TC action");
892 			err = -EOPNOTSUPP;
893 			goto out;
894 		}
895 	}
896 
897 	err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
898 					       &multi);
899 	if (err) {
900 		NL_SET_ERR_MSG_MOD(fco->common.extack,
901 				   "No matching port keyset for filter protocol and keys");
902 		goto out;
903 	}
904 
905 	/* provide the l3 protocol to guide the keyset selection */
906 	err = vcap_val_rule(vrule, l3_proto);
907 	if (err) {
908 		vcap_set_tc_exterr(fco, vrule);
909 		goto out;
910 	}
911 	err = vcap_add_rule(vrule);
912 	if (err)
913 		NL_SET_ERR_MSG_MOD(fco->common.extack,
914 				   "Could not add the filter");
915 
916 	if (l3_proto == ETH_P_ALL)
917 		err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
918 						    &multi);
919 
920 out:
921 	vcap_free_rule(vrule);
922 	return err;
923 }
924 
925 static int sparx5_tc_flower_destroy(struct net_device *ndev,
926 				    struct flow_cls_offload *fco,
927 				    struct vcap_admin *admin)
928 {
929 	struct sparx5_port *port = netdev_priv(ndev);
930 	struct vcap_control *vctrl;
931 	int err = -ENOENT, rule_id;
932 
933 	vctrl = port->sparx5->vcap_ctrl;
934 	while (true) {
935 		rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
936 		if (rule_id <= 0)
937 			break;
938 		err = vcap_del_rule(vctrl, ndev, rule_id);
939 		if (err) {
940 			pr_err("%s:%d: could not delete rule %d\n",
941 			       __func__, __LINE__, rule_id);
942 			break;
943 		}
944 	}
945 	return err;
946 }
947 
948 /* Collect packet counts from all rules with the same cookie */
949 static int sparx5_tc_rule_counter_cb(void *arg, struct vcap_rule *rule)
950 {
951 	struct sparx5_tc_rule_pkt_cnt *rinfo = arg;
952 	struct vcap_counter counter;
953 	int err = 0;
954 
955 	if (rule->cookie == rinfo->cookie) {
956 		err = vcap_rule_get_counter(rule, &counter);
957 		if (err)
958 			return err;
959 		rinfo->pkts += counter.value;
960 		/* Reset the rule counter */
961 		counter.value = 0;
962 		vcap_rule_set_counter(rule, &counter);
963 	}
964 	return err;
965 }
966 
967 static int sparx5_tc_flower_stats(struct net_device *ndev,
968 				  struct flow_cls_offload *fco,
969 				  struct vcap_admin *admin)
970 {
971 	struct sparx5_port *port = netdev_priv(ndev);
972 	struct sparx5_tc_rule_pkt_cnt rinfo = {};
973 	struct vcap_control *vctrl;
974 	ulong lastused = 0;
975 	u64 drops = 0;
976 	u32 pkts = 0;
977 	int err;
978 
979 	rinfo.cookie = fco->cookie;
980 	vctrl = port->sparx5->vcap_ctrl;
981 	err = vcap_rule_iter(vctrl, sparx5_tc_rule_counter_cb, &rinfo);
982 	if (err)
983 		return err;
984 	pkts = rinfo.pkts;
985 	flow_stats_update(&fco->stats, 0x0, pkts, drops, lastused,
986 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
987 	return err;
988 }
989 
990 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
991 		     bool ingress)
992 {
993 	struct sparx5_port *port = netdev_priv(ndev);
994 	struct vcap_control *vctrl;
995 	struct vcap_admin *admin;
996 	int err = -EINVAL;
997 
998 	/* Get vcap instance from the chain id */
999 	vctrl = port->sparx5->vcap_ctrl;
1000 	admin = vcap_find_admin(vctrl, fco->common.chain_index);
1001 	if (!admin) {
1002 		NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
1003 		return err;
1004 	}
1005 
1006 	switch (fco->command) {
1007 	case FLOW_CLS_REPLACE:
1008 		return sparx5_tc_flower_replace(ndev, fco, admin);
1009 	case FLOW_CLS_DESTROY:
1010 		return sparx5_tc_flower_destroy(ndev, fco, admin);
1011 	case FLOW_CLS_STATS:
1012 		return sparx5_tc_flower_stats(ndev, fco, admin);
1013 	default:
1014 		return -EOPNOTSUPP;
1015 	}
1016 }
1017