1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/net_namespace.h>
8 #include <net/flow_dissector.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gact.h>
11 #include <net/tc_act/tc_mirred.h>
12 #include <net/tc_act/tc_vlan.h>
13 
14 #include "spectrum.h"
15 #include "core_acl_flex_keys.h"
16 
17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18 					 struct mlxsw_sp_acl_block *block,
19 					 struct mlxsw_sp_acl_rule_info *rulei,
20 					 struct flow_action *flow_action,
21 					 struct netlink_ext_ack *extack)
22 {
23 	const struct flow_action_entry *act;
24 	int mirror_act_count = 0;
25 	int err, i;
26 
27 	if (!flow_action_has_entries(flow_action))
28 		return 0;
29 
30 	/* Count action is inserted first */
31 	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
32 	if (err)
33 		return err;
34 
35 	flow_action_for_each(i, act, flow_action) {
36 		switch (act->id) {
37 		case FLOW_ACTION_ACCEPT:
38 			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
39 			if (err) {
40 				NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
41 				return err;
42 			}
43 			break;
44 		case FLOW_ACTION_DROP:
45 			err = mlxsw_sp_acl_rulei_act_drop(rulei);
46 			if (err) {
47 				NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
48 				return err;
49 			}
50 			break;
51 		case FLOW_ACTION_TRAP:
52 			err = mlxsw_sp_acl_rulei_act_trap(rulei);
53 			if (err) {
54 				NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
55 				return err;
56 			}
57 			break;
58 		case FLOW_ACTION_GOTO: {
59 			u32 chain_index = act->chain_index;
60 			struct mlxsw_sp_acl_ruleset *ruleset;
61 			u16 group_id;
62 
63 			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
64 							      chain_index,
65 							      MLXSW_SP_ACL_PROFILE_FLOWER);
66 			if (IS_ERR(ruleset))
67 				return PTR_ERR(ruleset);
68 
69 			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
70 			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
71 			if (err) {
72 				NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
73 				return err;
74 			}
75 			}
76 			break;
77 		case FLOW_ACTION_REDIRECT: {
78 			struct net_device *out_dev;
79 			struct mlxsw_sp_fid *fid;
80 			u16 fid_index;
81 
82 			if (mlxsw_sp_acl_block_is_egress_bound(block)) {
83 				NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
84 				return -EOPNOTSUPP;
85 			}
86 
87 			/* Forbid block with this rulei to be bound
88 			 * to egress in future.
89 			 */
90 			rulei->egress_bind_blocker = 1;
91 
92 			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
93 			fid_index = mlxsw_sp_fid_index(fid);
94 			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
95 							     fid_index, extack);
96 			if (err)
97 				return err;
98 
99 			out_dev = act->dev;
100 			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
101 							 out_dev, extack);
102 			if (err)
103 				return err;
104 			}
105 			break;
106 		case FLOW_ACTION_MIRRED: {
107 			struct net_device *out_dev = act->dev;
108 
109 			if (mirror_act_count++) {
110 				NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
111 				return -EOPNOTSUPP;
112 			}
113 
114 			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
115 							    block, out_dev,
116 							    extack);
117 			if (err)
118 				return err;
119 			}
120 			break;
121 		case FLOW_ACTION_VLAN_MANGLE: {
122 			u16 proto = be16_to_cpu(act->vlan.proto);
123 			u8 prio = act->vlan.prio;
124 			u16 vid = act->vlan.vid;
125 
126 			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
127 							   act->id, vid,
128 							   proto, prio, extack);
129 			}
130 		default:
131 			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
132 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
133 			return -EOPNOTSUPP;
134 		}
135 	}
136 	return 0;
137 }
138 
139 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
140 				      struct flow_cls_offload *f,
141 				      struct mlxsw_sp_acl_block *block)
142 {
143 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
144 	struct mlxsw_sp_port *mlxsw_sp_port;
145 	struct net_device *ingress_dev;
146 	struct flow_match_meta match;
147 
148 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
149 		return 0;
150 
151 	flow_rule_match_meta(rule, &match);
152 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
153 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
154 		return -EINVAL;
155 	}
156 
157 	ingress_dev = __dev_get_by_index(block->net,
158 					 match.key->ingress_ifindex);
159 	if (!ingress_dev) {
160 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
161 		return -EINVAL;
162 	}
163 
164 	if (!mlxsw_sp_port_dev_check(ingress_dev)) {
165 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
166 		return -EINVAL;
167 	}
168 
169 	mlxsw_sp_port = netdev_priv(ingress_dev);
170 	if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
171 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
172 		return -EINVAL;
173 	}
174 
175 	mlxsw_sp_acl_rulei_keymask_u32(rulei,
176 				       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
177 				       mlxsw_sp_port->local_port,
178 				       0xFFFFFFFF);
179 	return 0;
180 }
181 
182 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
183 				       struct flow_cls_offload *f)
184 {
185 	struct flow_match_ipv4_addrs match;
186 
187 	flow_rule_match_ipv4_addrs(f->rule, &match);
188 
189 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
190 				       (char *) &match.key->src,
191 				       (char *) &match.mask->src, 4);
192 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
193 				       (char *) &match.key->dst,
194 				       (char *) &match.mask->dst, 4);
195 }
196 
197 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
198 				       struct flow_cls_offload *f)
199 {
200 	struct flow_match_ipv6_addrs match;
201 
202 	flow_rule_match_ipv6_addrs(f->rule, &match);
203 
204 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
205 				       &match.key->src.s6_addr[0x0],
206 				       &match.mask->src.s6_addr[0x0], 4);
207 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
208 				       &match.key->src.s6_addr[0x4],
209 				       &match.mask->src.s6_addr[0x4], 4);
210 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
211 				       &match.key->src.s6_addr[0x8],
212 				       &match.mask->src.s6_addr[0x8], 4);
213 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
214 				       &match.key->src.s6_addr[0xC],
215 				       &match.mask->src.s6_addr[0xC], 4);
216 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
217 				       &match.key->dst.s6_addr[0x0],
218 				       &match.mask->dst.s6_addr[0x0], 4);
219 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
220 				       &match.key->dst.s6_addr[0x4],
221 				       &match.mask->dst.s6_addr[0x4], 4);
222 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
223 				       &match.key->dst.s6_addr[0x8],
224 				       &match.mask->dst.s6_addr[0x8], 4);
225 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
226 				       &match.key->dst.s6_addr[0xC],
227 				       &match.mask->dst.s6_addr[0xC], 4);
228 }
229 
230 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
231 				       struct mlxsw_sp_acl_rule_info *rulei,
232 				       struct flow_cls_offload *f,
233 				       u8 ip_proto)
234 {
235 	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
236 	struct flow_match_ports match;
237 
238 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
239 		return 0;
240 
241 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
242 		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
243 		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
244 		return -EINVAL;
245 	}
246 
247 	flow_rule_match_ports(rule, &match);
248 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
249 				       ntohs(match.key->dst),
250 				       ntohs(match.mask->dst));
251 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
252 				       ntohs(match.key->src),
253 				       ntohs(match.mask->src));
254 	return 0;
255 }
256 
257 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
258 				     struct mlxsw_sp_acl_rule_info *rulei,
259 				     struct flow_cls_offload *f,
260 				     u8 ip_proto)
261 {
262 	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
263 	struct flow_match_tcp match;
264 
265 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
266 		return 0;
267 
268 	if (ip_proto != IPPROTO_TCP) {
269 		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
270 		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
271 		return -EINVAL;
272 	}
273 
274 	flow_rule_match_tcp(rule, &match);
275 
276 	if (match.mask->flags & htons(0x0E00)) {
277 		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
278 		dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
279 		return -EINVAL;
280 	}
281 
282 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
283 				       ntohs(match.key->flags),
284 				       ntohs(match.mask->flags));
285 	return 0;
286 }
287 
288 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
289 				    struct mlxsw_sp_acl_rule_info *rulei,
290 				    struct flow_cls_offload *f,
291 				    u16 n_proto)
292 {
293 	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
294 	struct flow_match_ip match;
295 
296 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
297 		return 0;
298 
299 	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
300 		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
301 		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
302 		return -EINVAL;
303 	}
304 
305 	flow_rule_match_ip(rule, &match);
306 
307 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
308 				       match.key->ttl, match.mask->ttl);
309 
310 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
311 				       match.key->tos & 0x3,
312 				       match.mask->tos & 0x3);
313 
314 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
315 				       match.key->tos >> 2,
316 				       match.mask->tos >> 2);
317 
318 	return 0;
319 }
320 
321 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
322 				 struct mlxsw_sp_acl_block *block,
323 				 struct mlxsw_sp_acl_rule_info *rulei,
324 				 struct flow_cls_offload *f)
325 {
326 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
327 	struct flow_dissector *dissector = rule->match.dissector;
328 	u16 n_proto_mask = 0;
329 	u16 n_proto_key = 0;
330 	u16 addr_type = 0;
331 	u8 ip_proto = 0;
332 	int err;
333 
334 	if (dissector->used_keys &
335 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
336 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
337 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
338 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
339 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
340 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
341 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
342 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
343 	      BIT(FLOW_DISSECTOR_KEY_IP) |
344 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
345 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
346 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
347 		return -EOPNOTSUPP;
348 	}
349 
350 	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
351 
352 	err = mlxsw_sp_flower_parse_meta(rulei, f, block);
353 	if (err)
354 		return err;
355 
356 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
357 		struct flow_match_control match;
358 
359 		flow_rule_match_control(rule, &match);
360 		addr_type = match.key->addr_type;
361 	}
362 
363 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
364 		struct flow_match_basic match;
365 
366 		flow_rule_match_basic(rule, &match);
367 		n_proto_key = ntohs(match.key->n_proto);
368 		n_proto_mask = ntohs(match.mask->n_proto);
369 
370 		if (n_proto_key == ETH_P_ALL) {
371 			n_proto_key = 0;
372 			n_proto_mask = 0;
373 		}
374 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
375 					       MLXSW_AFK_ELEMENT_ETHERTYPE,
376 					       n_proto_key, n_proto_mask);
377 
378 		ip_proto = match.key->ip_proto;
379 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
380 					       MLXSW_AFK_ELEMENT_IP_PROTO,
381 					       match.key->ip_proto,
382 					       match.mask->ip_proto);
383 	}
384 
385 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
386 		struct flow_match_eth_addrs match;
387 
388 		flow_rule_match_eth_addrs(rule, &match);
389 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
390 					       MLXSW_AFK_ELEMENT_DMAC_32_47,
391 					       match.key->dst,
392 					       match.mask->dst, 2);
393 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
394 					       MLXSW_AFK_ELEMENT_DMAC_0_31,
395 					       match.key->dst + 2,
396 					       match.mask->dst + 2, 4);
397 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
398 					       MLXSW_AFK_ELEMENT_SMAC_32_47,
399 					       match.key->src,
400 					       match.mask->src, 2);
401 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
402 					       MLXSW_AFK_ELEMENT_SMAC_0_31,
403 					       match.key->src + 2,
404 					       match.mask->src + 2, 4);
405 	}
406 
407 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
408 		struct flow_match_vlan match;
409 
410 		flow_rule_match_vlan(rule, &match);
411 		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
412 			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
413 			return -EOPNOTSUPP;
414 		}
415 
416 		/* Forbid block with this rulei to be bound
417 		 * to egress in future.
418 		 */
419 		rulei->egress_bind_blocker = 1;
420 
421 		if (match.mask->vlan_id != 0)
422 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
423 						       MLXSW_AFK_ELEMENT_VID,
424 						       match.key->vlan_id,
425 						       match.mask->vlan_id);
426 		if (match.mask->vlan_priority != 0)
427 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
428 						       MLXSW_AFK_ELEMENT_PCP,
429 						       match.key->vlan_priority,
430 						       match.mask->vlan_priority);
431 	}
432 
433 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
434 		mlxsw_sp_flower_parse_ipv4(rulei, f);
435 
436 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
437 		mlxsw_sp_flower_parse_ipv6(rulei, f);
438 
439 	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
440 	if (err)
441 		return err;
442 	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
443 	if (err)
444 		return err;
445 
446 	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
447 	if (err)
448 		return err;
449 
450 	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
451 					     &f->rule->action,
452 					     f->common.extack);
453 }
454 
455 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
456 			    struct mlxsw_sp_acl_block *block,
457 			    struct flow_cls_offload *f)
458 {
459 	struct mlxsw_sp_acl_rule_info *rulei;
460 	struct mlxsw_sp_acl_ruleset *ruleset;
461 	struct mlxsw_sp_acl_rule *rule;
462 	int err;
463 
464 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
465 					   f->common.chain_index,
466 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
467 	if (IS_ERR(ruleset))
468 		return PTR_ERR(ruleset);
469 
470 	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
471 					f->common.extack);
472 	if (IS_ERR(rule)) {
473 		err = PTR_ERR(rule);
474 		goto err_rule_create;
475 	}
476 
477 	rulei = mlxsw_sp_acl_rule_rulei(rule);
478 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
479 	if (err)
480 		goto err_flower_parse;
481 
482 	err = mlxsw_sp_acl_rulei_commit(rulei);
483 	if (err)
484 		goto err_rulei_commit;
485 
486 	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
487 	if (err)
488 		goto err_rule_add;
489 
490 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
491 	return 0;
492 
493 err_rule_add:
494 err_rulei_commit:
495 err_flower_parse:
496 	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
497 err_rule_create:
498 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
499 	return err;
500 }
501 
502 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
503 			     struct mlxsw_sp_acl_block *block,
504 			     struct flow_cls_offload *f)
505 {
506 	struct mlxsw_sp_acl_ruleset *ruleset;
507 	struct mlxsw_sp_acl_rule *rule;
508 
509 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
510 					   f->common.chain_index,
511 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
512 	if (IS_ERR(ruleset))
513 		return;
514 
515 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
516 	if (rule) {
517 		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
518 		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
519 	}
520 
521 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
522 }
523 
524 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
525 			  struct mlxsw_sp_acl_block *block,
526 			  struct flow_cls_offload *f)
527 {
528 	struct mlxsw_sp_acl_ruleset *ruleset;
529 	struct mlxsw_sp_acl_rule *rule;
530 	u64 packets;
531 	u64 lastuse;
532 	u64 bytes;
533 	int err;
534 
535 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
536 					   f->common.chain_index,
537 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
538 	if (WARN_ON(IS_ERR(ruleset)))
539 		return -EINVAL;
540 
541 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
542 	if (!rule)
543 		return -EINVAL;
544 
545 	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
546 					  &lastuse);
547 	if (err)
548 		goto err_rule_get_stats;
549 
550 	flow_stats_update(&f->stats, bytes, packets, lastuse);
551 
552 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
553 	return 0;
554 
555 err_rule_get_stats:
556 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
557 	return err;
558 }
559 
560 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
561 				 struct mlxsw_sp_acl_block *block,
562 				 struct flow_cls_offload *f)
563 {
564 	struct mlxsw_sp_acl_ruleset *ruleset;
565 	struct mlxsw_sp_acl_rule_info rulei;
566 	int err;
567 
568 	memset(&rulei, 0, sizeof(rulei));
569 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
570 	if (err)
571 		return err;
572 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
573 					   f->common.chain_index,
574 					   MLXSW_SP_ACL_PROFILE_FLOWER,
575 					   &rulei.values.elusage);
576 
577 	/* keep the reference to the ruleset */
578 	return PTR_ERR_OR_ZERO(ruleset);
579 }
580 
581 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
582 				   struct mlxsw_sp_acl_block *block,
583 				   struct flow_cls_offload *f)
584 {
585 	struct mlxsw_sp_acl_ruleset *ruleset;
586 
587 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
588 					   f->common.chain_index,
589 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
590 	if (IS_ERR(ruleset))
591 		return;
592 	/* put the reference to the ruleset kept in create */
593 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
594 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
595 }
596