1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
3 
4 #include "prestera.h"
5 #include "prestera_acl.h"
6 #include "prestera_flow.h"
7 #include "prestera_flower.h"
8 
9 struct prestera_flower_template {
10 	struct prestera_acl_ruleset *ruleset;
11 	struct list_head list;
12 	u32 chain_index;
13 };
14 
15 static void
16 prestera_flower_template_free(struct prestera_flower_template *template)
17 {
18 	prestera_acl_ruleset_put(template->ruleset);
19 	list_del(&template->list);
20 	kfree(template);
21 }
22 
23 void prestera_flower_template_cleanup(struct prestera_flow_block *block)
24 {
25 	struct prestera_flower_template *template, *tmp;
26 
27 	/* put the reference to all rulesets kept in tmpl create */
28 	list_for_each_entry_safe(template, tmp, &block->template_list, list)
29 		prestera_flower_template_free(template);
30 }
31 
32 static int
33 prestera_flower_parse_goto_action(struct prestera_flow_block *block,
34 				  struct prestera_acl_rule *rule,
35 				  u32 chain_index,
36 				  const struct flow_action_entry *act)
37 {
38 	struct prestera_acl_ruleset *ruleset;
39 
40 	if (act->chain_index <= chain_index)
41 		/* we can jump only forward */
42 		return -EINVAL;
43 
44 	if (rule->re_arg.jump.valid)
45 		return -EEXIST;
46 
47 	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
48 					   act->chain_index);
49 	if (IS_ERR(ruleset))
50 		return PTR_ERR(ruleset);
51 
52 	rule->re_arg.jump.valid = 1;
53 	rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
54 
55 	rule->jump_ruleset = ruleset;
56 
57 	return 0;
58 }
59 
60 static int prestera_flower_parse_actions(struct prestera_flow_block *block,
61 					 struct prestera_acl_rule *rule,
62 					 struct flow_action *flow_action,
63 					 u32 chain_index,
64 					 struct netlink_ext_ack *extack)
65 {
66 	const struct flow_action_entry *act;
67 	int err, i;
68 
69 	/* whole struct (rule->re_arg) must be initialized with 0 */
70 	if (!flow_action_has_entries(flow_action))
71 		return 0;
72 
73 	if (!flow_action_mixed_hw_stats_check(flow_action, extack))
74 		return -EOPNOTSUPP;
75 
76 	act = flow_action_first_entry_get(flow_action);
77 	if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
78 		/* Nothing to do */
79 	} else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
80 		/* setup counter first */
81 		rule->re_arg.count.valid = true;
82 		err = prestera_acl_chain_to_client(chain_index,
83 						   &rule->re_arg.count.client);
84 		if (err)
85 			return err;
86 	} else {
87 		NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
88 		return -EOPNOTSUPP;
89 	}
90 
91 	flow_action_for_each(i, act, flow_action) {
92 		switch (act->id) {
93 		case FLOW_ACTION_ACCEPT:
94 			if (rule->re_arg.accept.valid)
95 				return -EEXIST;
96 
97 			rule->re_arg.accept.valid = 1;
98 			break;
99 		case FLOW_ACTION_DROP:
100 			if (rule->re_arg.drop.valid)
101 				return -EEXIST;
102 
103 			rule->re_arg.drop.valid = 1;
104 			break;
105 		case FLOW_ACTION_TRAP:
106 			if (rule->re_arg.trap.valid)
107 				return -EEXIST;
108 
109 			rule->re_arg.trap.valid = 1;
110 			break;
111 		case FLOW_ACTION_POLICE:
112 			if (rule->re_arg.police.valid)
113 				return -EEXIST;
114 
115 			rule->re_arg.police.valid = 1;
116 			rule->re_arg.police.rate =
117 				act->police.rate_bytes_ps;
118 			rule->re_arg.police.burst = act->police.burst;
119 			rule->re_arg.police.ingress = true;
120 			break;
121 		case FLOW_ACTION_GOTO:
122 			err = prestera_flower_parse_goto_action(block, rule,
123 								chain_index,
124 								act);
125 			if (err)
126 				return err;
127 			break;
128 		default:
129 			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
130 			pr_err("Unsupported action\n");
131 			return -EOPNOTSUPP;
132 		}
133 	}
134 
135 	return 0;
136 }
137 
138 static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
139 				      struct flow_cls_offload *f,
140 				      struct prestera_flow_block *block)
141 {	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
142 	struct prestera_acl_match *r_match = &rule->re_key.match;
143 	struct prestera_port *port;
144 	struct net_device *ingress_dev;
145 	struct flow_match_meta match;
146 	__be16 key, mask;
147 
148 	flow_rule_match_meta(f_rule, &match);
149 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
150 		NL_SET_ERR_MSG_MOD(f->common.extack,
151 				   "Unsupported ingress ifindex mask");
152 		return -EINVAL;
153 	}
154 
155 	ingress_dev = __dev_get_by_index(block->net,
156 					 match.key->ingress_ifindex);
157 	if (!ingress_dev) {
158 		NL_SET_ERR_MSG_MOD(f->common.extack,
159 				   "Can't find specified ingress port to match on");
160 		return -EINVAL;
161 	}
162 
163 	if (!prestera_netdev_check(ingress_dev)) {
164 		NL_SET_ERR_MSG_MOD(f->common.extack,
165 				   "Can't match on switchdev ingress port");
166 		return -EINVAL;
167 	}
168 	port = netdev_priv(ingress_dev);
169 
170 	mask = htons(0x1FFF);
171 	key = htons(port->hw_id);
172 	rule_match_set(r_match->key, SYS_PORT, key);
173 	rule_match_set(r_match->mask, SYS_PORT, mask);
174 
175 	mask = htons(0x1FF);
176 	key = htons(port->dev_id);
177 	rule_match_set(r_match->key, SYS_DEV, key);
178 	rule_match_set(r_match->mask, SYS_DEV, mask);
179 
180 	return 0;
181 
182 }
183 
184 static int prestera_flower_parse(struct prestera_flow_block *block,
185 				 struct prestera_acl_rule *rule,
186 				 struct flow_cls_offload *f)
187 {	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
188 	struct flow_dissector *dissector = f_rule->match.dissector;
189 	struct prestera_acl_match *r_match = &rule->re_key.match;
190 	__be16 n_proto_mask = 0;
191 	__be16 n_proto_key = 0;
192 	u16 addr_type = 0;
193 	u8 ip_proto = 0;
194 	int err;
195 
196 	if (dissector->used_keys &
197 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
198 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
199 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
200 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
201 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
202 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
203 	      BIT(FLOW_DISSECTOR_KEY_ICMP) |
204 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
205 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
206 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
207 		return -EOPNOTSUPP;
208 	}
209 
210 	prestera_acl_rule_priority_set(rule, f->common.prio);
211 
212 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
213 		err = prestera_flower_parse_meta(rule, f, block);
214 		if (err)
215 			return err;
216 	}
217 
218 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
219 		struct flow_match_control match;
220 
221 		flow_rule_match_control(f_rule, &match);
222 		addr_type = match.key->addr_type;
223 	}
224 
225 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
226 		struct flow_match_basic match;
227 
228 		flow_rule_match_basic(f_rule, &match);
229 		n_proto_key = match.key->n_proto;
230 		n_proto_mask = match.mask->n_proto;
231 
232 		if (ntohs(match.key->n_proto) == ETH_P_ALL) {
233 			n_proto_key = 0;
234 			n_proto_mask = 0;
235 		}
236 
237 		rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
238 		rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
239 
240 		rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
241 		rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
242 		ip_proto = match.key->ip_proto;
243 	}
244 
245 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
246 		struct flow_match_eth_addrs match;
247 
248 		flow_rule_match_eth_addrs(f_rule, &match);
249 
250 		/* DA key, mask */
251 		rule_match_set_n(r_match->key,
252 				 ETH_DMAC_0, &match.key->dst[0], 4);
253 		rule_match_set_n(r_match->key,
254 				 ETH_DMAC_1, &match.key->dst[4], 2);
255 
256 		rule_match_set_n(r_match->mask,
257 				 ETH_DMAC_0, &match.mask->dst[0], 4);
258 		rule_match_set_n(r_match->mask,
259 				 ETH_DMAC_1, &match.mask->dst[4], 2);
260 
261 		/* SA key, mask */
262 		rule_match_set_n(r_match->key,
263 				 ETH_SMAC_0, &match.key->src[0], 4);
264 		rule_match_set_n(r_match->key,
265 				 ETH_SMAC_1, &match.key->src[4], 2);
266 
267 		rule_match_set_n(r_match->mask,
268 				 ETH_SMAC_0, &match.mask->src[0], 4);
269 		rule_match_set_n(r_match->mask,
270 				 ETH_SMAC_1, &match.mask->src[4], 2);
271 	}
272 
273 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
274 		struct flow_match_ipv4_addrs match;
275 
276 		flow_rule_match_ipv4_addrs(f_rule, &match);
277 
278 		rule_match_set(r_match->key, IP_SRC, match.key->src);
279 		rule_match_set(r_match->mask, IP_SRC, match.mask->src);
280 
281 		rule_match_set(r_match->key, IP_DST, match.key->dst);
282 		rule_match_set(r_match->mask, IP_DST, match.mask->dst);
283 	}
284 
285 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
286 		struct flow_match_ports match;
287 
288 		if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
289 			NL_SET_ERR_MSG_MOD
290 			    (f->common.extack,
291 			     "Only UDP and TCP keys are supported");
292 			return -EINVAL;
293 		}
294 
295 		flow_rule_match_ports(f_rule, &match);
296 
297 		rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
298 		rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
299 
300 		rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
301 		rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
302 	}
303 
304 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
305 		struct flow_match_vlan match;
306 
307 		flow_rule_match_vlan(f_rule, &match);
308 
309 		if (match.mask->vlan_id != 0) {
310 			__be16 key = cpu_to_be16(match.key->vlan_id);
311 			__be16 mask = cpu_to_be16(match.mask->vlan_id);
312 
313 			rule_match_set(r_match->key, VLAN_ID, key);
314 			rule_match_set(r_match->mask, VLAN_ID, mask);
315 		}
316 
317 		rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
318 		rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
319 	}
320 
321 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
322 		struct flow_match_icmp match;
323 
324 		flow_rule_match_icmp(f_rule, &match);
325 
326 		rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
327 		rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
328 
329 		rule_match_set(r_match->key, ICMP_CODE, match.key->code);
330 		rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
331 	}
332 
333 	return prestera_flower_parse_actions(block, rule, &f->rule->action,
334 					     f->common.chain_index,
335 					     f->common.extack);
336 }
337 
338 int prestera_flower_replace(struct prestera_flow_block *block,
339 			    struct flow_cls_offload *f)
340 {
341 	struct prestera_acl_ruleset *ruleset;
342 	struct prestera_acl *acl = block->sw->acl;
343 	struct prestera_acl_rule *rule;
344 	int err;
345 
346 	ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
347 	if (IS_ERR(ruleset))
348 		return PTR_ERR(ruleset);
349 
350 	/* increments the ruleset reference */
351 	rule = prestera_acl_rule_create(ruleset, f->cookie,
352 					f->common.chain_index);
353 	if (IS_ERR(rule)) {
354 		err = PTR_ERR(rule);
355 		goto err_rule_create;
356 	}
357 
358 	err = prestera_flower_parse(block, rule, f);
359 	if (err)
360 		goto err_rule_add;
361 
362 	if (!prestera_acl_ruleset_is_offload(ruleset)) {
363 		err = prestera_acl_ruleset_offload(ruleset);
364 		if (err)
365 			goto err_ruleset_offload;
366 	}
367 
368 	err = prestera_acl_rule_add(block->sw, rule);
369 	if (err)
370 		goto err_rule_add;
371 
372 	prestera_acl_ruleset_put(ruleset);
373 	return 0;
374 
375 err_ruleset_offload:
376 err_rule_add:
377 	prestera_acl_rule_destroy(rule);
378 err_rule_create:
379 	prestera_acl_ruleset_put(ruleset);
380 	return err;
381 }
382 
383 void prestera_flower_destroy(struct prestera_flow_block *block,
384 			     struct flow_cls_offload *f)
385 {
386 	struct prestera_acl_ruleset *ruleset;
387 	struct prestera_acl_rule *rule;
388 
389 	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
390 					      f->common.chain_index);
391 	if (IS_ERR(ruleset))
392 		return;
393 
394 	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
395 	if (rule) {
396 		prestera_acl_rule_del(block->sw, rule);
397 		prestera_acl_rule_destroy(rule);
398 	}
399 	prestera_acl_ruleset_put(ruleset);
400 
401 }
402 
403 int prestera_flower_tmplt_create(struct prestera_flow_block *block,
404 				 struct flow_cls_offload *f)
405 {
406 	struct prestera_flower_template *template;
407 	struct prestera_acl_ruleset *ruleset;
408 	struct prestera_acl_rule rule;
409 	int err;
410 
411 	memset(&rule, 0, sizeof(rule));
412 	err = prestera_flower_parse(block, &rule, f);
413 	if (err)
414 		return err;
415 
416 	template = kmalloc(sizeof(*template), GFP_KERNEL);
417 	if (!template) {
418 		err = -ENOMEM;
419 		goto err_malloc;
420 	}
421 
422 	prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
423 	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
424 					   f->common.chain_index);
425 	if (IS_ERR_OR_NULL(ruleset)) {
426 		err = -EINVAL;
427 		goto err_ruleset_get;
428 	}
429 
430 	/* preserve keymask/template to this ruleset */
431 	prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
432 
433 	/* skip error, as it is not possible to reject template operation,
434 	 * so, keep the reference to the ruleset for rules to be added
435 	 * to that ruleset later. In case of offload fail, the ruleset
436 	 * will be offloaded again during adding a new rule. Also,
437 	 * unlikly possble that ruleset is already offloaded at this staage.
438 	 */
439 	prestera_acl_ruleset_offload(ruleset);
440 
441 	/* keep the reference to the ruleset */
442 	template->ruleset = ruleset;
443 	template->chain_index = f->common.chain_index;
444 	list_add_rcu(&template->list, &block->template_list);
445 	return 0;
446 
447 err_ruleset_get:
448 	kfree(template);
449 err_malloc:
450 	NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
451 	return err;
452 }
453 
454 void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
455 				   struct flow_cls_offload *f)
456 {
457 	struct prestera_flower_template *template, *tmp;
458 
459 	list_for_each_entry_safe(template, tmp, &block->template_list, list)
460 		if (template->chain_index == f->common.chain_index) {
461 			/* put the reference to the ruleset kept in create */
462 			prestera_flower_template_free(template);
463 			return;
464 		}
465 }
466 
467 int prestera_flower_stats(struct prestera_flow_block *block,
468 			  struct flow_cls_offload *f)
469 {
470 	struct prestera_acl_ruleset *ruleset;
471 	struct prestera_acl_rule *rule;
472 	u64 packets;
473 	u64 lastuse;
474 	u64 bytes;
475 	int err;
476 
477 	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
478 					      f->common.chain_index);
479 	if (IS_ERR(ruleset))
480 		return PTR_ERR(ruleset);
481 
482 	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
483 	if (!rule) {
484 		err = -EINVAL;
485 		goto err_rule_get_stats;
486 	}
487 
488 	err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
489 					  &bytes, &lastuse);
490 	if (err)
491 		goto err_rule_get_stats;
492 
493 	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
494 			  FLOW_ACTION_HW_STATS_DELAYED);
495 
496 err_rule_get_stats:
497 	prestera_acl_ruleset_put(ruleset);
498 	return err;
499 }
500