1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3 
4 #include <linux/hash.h>
5 #include <linux/hashtable.h>
6 #include <linux/jhash.h>
7 #include <linux/math64.h>
8 #include <linux/vmalloc.h>
9 #include <net/pkt_cls.h>
10 #include <net/pkt_sched.h>
11 
12 #include "cmsg.h"
13 #include "main.h"
14 #include "../nfp_port.h"
15 
16 #define NFP_FL_QOS_UPDATE		msecs_to_jiffies(1000)
17 #define NFP_FL_QOS_PPS  BIT(15)
18 #define NFP_FL_QOS_METER  BIT(10)
19 
20 struct nfp_police_cfg_head {
21 	__be32 flags_opts;
22 	union {
23 		__be32 meter_id;
24 		__be32 port;
25 	};
26 };
27 
28 enum NFP_FL_QOS_TYPES {
29 	NFP_FL_QOS_TYPE_BPS,
30 	NFP_FL_QOS_TYPE_PPS,
31 	NFP_FL_QOS_TYPE_MAX,
32 };
33 
34 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
35  * See RFC 2698 for more details.
36  * ----------------------------------------------------------------
37  *    3                   2                   1
38  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
39  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
40  * |             Reserved          |p|         Reserved            |
41  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
42  * |                          Port Ingress                         |
43  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
44  * |                        Token Bucket Peak                      |
45  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
46  * |                     Token Bucket Committed                    |
47  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
48  * |                         Peak Burst Size                       |
49  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
50  * |                      Committed Burst Size                     |
51  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
52  * |                      Peak Information Rate                    |
53  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
54  * |                    Committed Information Rate                 |
55  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
56  * Word[0](FLag options):
57  * [15] p(pps) 1 for pps, 0 for bps
58  *
59  * Meter control message
60  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
61  * +-------------------------------+-+---+-----+-+---------+-+---+-+
62  * |            Reserved           |p| Y |TYPE |E|TSHFV    |P| PC|R|
63  * +-------------------------------+-+---+-----+-+---------+-+---+-+
64  * |                            meter ID                           |
65  * +-------------------------------+-------------------------------+
66  *
67  */
68 struct nfp_police_config {
69 	struct nfp_police_cfg_head head;
70 	__be32 bkt_tkn_p;
71 	__be32 bkt_tkn_c;
72 	__be32 pbs;
73 	__be32 cbs;
74 	__be32 pir;
75 	__be32 cir;
76 };
77 
78 struct nfp_police_stats_reply {
79 	struct nfp_police_cfg_head head;
80 	__be64 pass_bytes;
81 	__be64 pass_pkts;
82 	__be64 drop_bytes;
83 	__be64 drop_pkts;
84 };
85 
86 int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
87 				  bool pps, u32 id, u32 rate, u32 burst)
88 {
89 	struct nfp_police_config *config;
90 	struct sk_buff *skb;
91 
92 	skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
93 				    NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
94 	if (!skb)
95 		return -ENOMEM;
96 
97 	config = nfp_flower_cmsg_get_data(skb);
98 	memset(config, 0, sizeof(struct nfp_police_config));
99 	if (pps)
100 		config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
101 	if (!ingress)
102 		config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_METER);
103 
104 	if (ingress)
105 		config->head.port = cpu_to_be32(id);
106 	else
107 		config->head.meter_id = cpu_to_be32(id);
108 
109 	config->bkt_tkn_p = cpu_to_be32(burst);
110 	config->bkt_tkn_c = cpu_to_be32(burst);
111 	config->pbs = cpu_to_be32(burst);
112 	config->cbs = cpu_to_be32(burst);
113 	config->pir = cpu_to_be32(rate);
114 	config->cir = cpu_to_be32(rate);
115 	nfp_ctrl_tx(app->ctrl, skb);
116 
117 	return 0;
118 }
119 
120 static int nfp_policer_validate(const struct flow_action *action,
121 				const struct flow_action_entry *act,
122 				struct netlink_ext_ack *extack)
123 {
124 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
125 		NL_SET_ERR_MSG_MOD(extack,
126 				   "Offload not supported when exceed action is not drop");
127 		return -EOPNOTSUPP;
128 	}
129 
130 	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
131 	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
132 		NL_SET_ERR_MSG_MOD(extack,
133 				   "Offload not supported when conform action is not pipe or ok");
134 		return -EOPNOTSUPP;
135 	}
136 
137 	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
138 	    !flow_action_is_last_entry(action, act)) {
139 		NL_SET_ERR_MSG_MOD(extack,
140 				   "Offload not supported when conform action is ok, but action is not last");
141 		return -EOPNOTSUPP;
142 	}
143 
144 	if (act->police.peakrate_bytes_ps ||
145 	    act->police.avrate || act->police.overhead) {
146 		NL_SET_ERR_MSG_MOD(extack,
147 				   "Offload not supported when peakrate/avrate/overhead is configured");
148 		return -EOPNOTSUPP;
149 	}
150 
151 	return 0;
152 }
153 
154 static int
155 nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
156 				struct tc_cls_matchall_offload *flow,
157 				struct netlink_ext_ack *extack)
158 {
159 	struct flow_action_entry *paction = &flow->rule->action.entries[0];
160 	u32 action_num = flow->rule->action.num_entries;
161 	struct nfp_flower_priv *fl_priv = app->priv;
162 	struct flow_action_entry *action = NULL;
163 	struct nfp_flower_repr_priv *repr_priv;
164 	u32 netdev_port_id, i;
165 	struct nfp_repr *repr;
166 	bool pps_support;
167 	u32 bps_num = 0;
168 	u32 pps_num = 0;
169 	u32 burst;
170 	bool pps;
171 	u64 rate;
172 	int err;
173 
174 	if (!nfp_netdev_is_nfp_repr(netdev)) {
175 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
176 		return -EOPNOTSUPP;
177 	}
178 	repr = netdev_priv(netdev);
179 	repr_priv = repr->app_priv;
180 	netdev_port_id = nfp_repr_get_port_id(netdev);
181 	pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
182 
183 	if (repr_priv->block_shared) {
184 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
185 		return -EOPNOTSUPP;
186 	}
187 
188 	if (repr->port->type != NFP_PORT_VF_PORT) {
189 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
190 		return -EOPNOTSUPP;
191 	}
192 
193 	if (pps_support) {
194 		if (action_num > 2 || action_num == 0) {
195 			NL_SET_ERR_MSG_MOD(extack,
196 					   "unsupported offload: qos rate limit offload only support action number 1 or 2");
197 			return -EOPNOTSUPP;
198 		}
199 	} else {
200 		if (!flow_offload_has_one_action(&flow->rule->action)) {
201 			NL_SET_ERR_MSG_MOD(extack,
202 					   "unsupported offload: qos rate limit offload requires a single action");
203 			return -EOPNOTSUPP;
204 		}
205 	}
206 
207 	if (flow->common.prio != 1) {
208 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
209 		return -EOPNOTSUPP;
210 	}
211 
212 	for (i = 0 ; i < action_num; i++) {
213 		action = paction + i;
214 		if (action->id != FLOW_ACTION_POLICE) {
215 			NL_SET_ERR_MSG_MOD(extack,
216 					   "unsupported offload: qos rate limit offload requires police action");
217 			return -EOPNOTSUPP;
218 		}
219 
220 		err = nfp_policer_validate(&flow->rule->action, action, extack);
221 		if (err)
222 			return err;
223 
224 		if (action->police.rate_bytes_ps > 0) {
225 			if (bps_num++) {
226 				NL_SET_ERR_MSG_MOD(extack,
227 						   "unsupported offload: qos rate limit offload only support one BPS action");
228 				return -EOPNOTSUPP;
229 			}
230 		}
231 		if (action->police.rate_pkt_ps > 0) {
232 			if (!pps_support) {
233 				NL_SET_ERR_MSG_MOD(extack,
234 						   "unsupported offload: FW does not support PPS action");
235 				return -EOPNOTSUPP;
236 			}
237 			if (pps_num++) {
238 				NL_SET_ERR_MSG_MOD(extack,
239 						   "unsupported offload: qos rate limit offload only support one PPS action");
240 				return -EOPNOTSUPP;
241 			}
242 		}
243 	}
244 
245 	for (i = 0 ; i < action_num; i++) {
246 		/* Set QoS data for this interface */
247 		action = paction + i;
248 		if (action->police.rate_bytes_ps > 0) {
249 			rate = action->police.rate_bytes_ps;
250 			burst = action->police.burst;
251 		} else if (action->police.rate_pkt_ps > 0) {
252 			rate = action->police.rate_pkt_ps;
253 			burst = action->police.burst_pkt;
254 		} else {
255 			NL_SET_ERR_MSG_MOD(extack,
256 					   "unsupported offload: qos rate limit is not BPS or PPS");
257 			continue;
258 		}
259 
260 		if (rate != 0) {
261 			pps = false;
262 			if (action->police.rate_pkt_ps > 0)
263 				pps = true;
264 			nfp_flower_offload_one_police(repr->app, true,
265 						      pps, netdev_port_id,
266 						      rate, burst);
267 		}
268 	}
269 	repr_priv->qos_table.netdev_port_id = netdev_port_id;
270 	fl_priv->qos_rate_limiters++;
271 	if (fl_priv->qos_rate_limiters == 1)
272 		schedule_delayed_work(&fl_priv->qos_stats_work,
273 				      NFP_FL_QOS_UPDATE);
274 
275 	return 0;
276 }
277 
278 static int
279 nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
280 			       struct tc_cls_matchall_offload *flow,
281 			       struct netlink_ext_ack *extack)
282 {
283 	struct nfp_flower_priv *fl_priv = app->priv;
284 	struct nfp_flower_repr_priv *repr_priv;
285 	struct nfp_police_config *config;
286 	u32 netdev_port_id, i;
287 	struct nfp_repr *repr;
288 	struct sk_buff *skb;
289 	bool pps_support;
290 
291 	if (!nfp_netdev_is_nfp_repr(netdev)) {
292 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
293 		return -EOPNOTSUPP;
294 	}
295 	repr = netdev_priv(netdev);
296 
297 	netdev_port_id = nfp_repr_get_port_id(netdev);
298 	repr_priv = repr->app_priv;
299 	pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
300 
301 	if (!repr_priv->qos_table.netdev_port_id) {
302 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
303 		return -EOPNOTSUPP;
304 	}
305 
306 	memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
307 	fl_priv->qos_rate_limiters--;
308 	if (!fl_priv->qos_rate_limiters)
309 		cancel_delayed_work_sync(&fl_priv->qos_stats_work);
310 	for (i = 0 ; i < NFP_FL_QOS_TYPE_MAX; i++) {
311 		if (i == NFP_FL_QOS_TYPE_PPS && !pps_support)
312 			break;
313 		/* 0:bps 1:pps
314 		 * Clear QoS data for this interface.
315 		 * There is no need to check if a specific QOS_TYPE was
316 		 * configured as the firmware handles clearing a QoS entry
317 		 * safely, even if it wasn't explicitly added.
318 		 */
319 		skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
320 					    NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
321 		if (!skb)
322 			return -ENOMEM;
323 
324 		config = nfp_flower_cmsg_get_data(skb);
325 		memset(config, 0, sizeof(struct nfp_police_config));
326 		if (i == NFP_FL_QOS_TYPE_PPS)
327 			config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
328 		config->head.port = cpu_to_be32(netdev_port_id);
329 		nfp_ctrl_tx(repr->app->ctrl, skb);
330 	}
331 
332 	return 0;
333 }
334 
335 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
336 {
337 	struct nfp_flower_priv *fl_priv = app->priv;
338 	struct nfp_flower_repr_priv *repr_priv;
339 	struct nfp_police_stats_reply *msg;
340 	struct nfp_stat_pair *curr_stats;
341 	struct nfp_stat_pair *prev_stats;
342 	struct net_device *netdev;
343 	struct nfp_repr *repr;
344 	u32 netdev_port_id;
345 
346 	msg = nfp_flower_cmsg_get_data(skb);
347 	if (be32_to_cpu(msg->head.flags_opts) & NFP_FL_QOS_METER)
348 		return nfp_act_stats_reply(app, msg);
349 
350 	netdev_port_id = be32_to_cpu(msg->head.port);
351 	rcu_read_lock();
352 	netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
353 	if (!netdev)
354 		goto exit_unlock_rcu;
355 
356 	repr = netdev_priv(netdev);
357 	repr_priv = repr->app_priv;
358 	curr_stats = &repr_priv->qos_table.curr_stats;
359 	prev_stats = &repr_priv->qos_table.prev_stats;
360 
361 	spin_lock_bh(&fl_priv->qos_stats_lock);
362 	curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
363 			   be64_to_cpu(msg->drop_pkts);
364 	curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
365 			    be64_to_cpu(msg->drop_bytes);
366 
367 	if (!repr_priv->qos_table.last_update) {
368 		prev_stats->pkts = curr_stats->pkts;
369 		prev_stats->bytes = curr_stats->bytes;
370 	}
371 
372 	repr_priv->qos_table.last_update = jiffies;
373 	spin_unlock_bh(&fl_priv->qos_stats_lock);
374 
375 exit_unlock_rcu:
376 	rcu_read_unlock();
377 }
378 
379 static void
380 nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
381 			      u32 id, bool ingress)
382 {
383 	struct nfp_police_cfg_head *head;
384 	struct sk_buff *skb;
385 
386 	skb = nfp_flower_cmsg_alloc(fl_priv->app,
387 				    sizeof(struct nfp_police_cfg_head),
388 				    NFP_FLOWER_CMSG_TYPE_QOS_STATS,
389 				    GFP_ATOMIC);
390 	if (!skb)
391 		return;
392 	head = nfp_flower_cmsg_get_data(skb);
393 
394 	memset(head, 0, sizeof(struct nfp_police_cfg_head));
395 	if (ingress) {
396 		head->port = cpu_to_be32(id);
397 	} else {
398 		head->flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
399 		head->meter_id = cpu_to_be32(id);
400 	}
401 
402 	nfp_ctrl_tx(fl_priv->app->ctrl, skb);
403 }
404 
405 static void
406 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
407 {
408 	struct nfp_reprs *repr_set;
409 	int i;
410 
411 	rcu_read_lock();
412 	repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
413 	if (!repr_set)
414 		goto exit_unlock_rcu;
415 
416 	for (i = 0; i < repr_set->num_reprs; i++) {
417 		struct net_device *netdev;
418 
419 		netdev = rcu_dereference(repr_set->reprs[i]);
420 		if (netdev) {
421 			struct nfp_repr *priv = netdev_priv(netdev);
422 			struct nfp_flower_repr_priv *repr_priv;
423 			u32 netdev_port_id;
424 
425 			repr_priv = priv->app_priv;
426 			netdev_port_id = repr_priv->qos_table.netdev_port_id;
427 			if (!netdev_port_id)
428 				continue;
429 
430 			nfp_flower_stats_rlim_request(fl_priv,
431 						      netdev_port_id, true);
432 		}
433 	}
434 
435 exit_unlock_rcu:
436 	rcu_read_unlock();
437 }
438 
439 static void update_stats_cache(struct work_struct *work)
440 {
441 	struct delayed_work *delayed_work;
442 	struct nfp_flower_priv *fl_priv;
443 
444 	delayed_work = to_delayed_work(work);
445 	fl_priv = container_of(delayed_work, struct nfp_flower_priv,
446 			       qos_stats_work);
447 
448 	nfp_flower_stats_rlim_request_all(fl_priv);
449 	nfp_flower_stats_meter_request_all(fl_priv);
450 
451 	schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
452 }
453 
454 static int
455 nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
456 			      struct tc_cls_matchall_offload *flow,
457 			      struct netlink_ext_ack *extack)
458 {
459 	struct nfp_flower_priv *fl_priv = app->priv;
460 	struct nfp_flower_repr_priv *repr_priv;
461 	struct nfp_stat_pair *curr_stats;
462 	struct nfp_stat_pair *prev_stats;
463 	u64 diff_bytes, diff_pkts;
464 	struct nfp_repr *repr;
465 
466 	if (!nfp_netdev_is_nfp_repr(netdev)) {
467 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
468 		return -EOPNOTSUPP;
469 	}
470 	repr = netdev_priv(netdev);
471 
472 	repr_priv = repr->app_priv;
473 	if (!repr_priv->qos_table.netdev_port_id) {
474 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
475 		return -EOPNOTSUPP;
476 	}
477 
478 	spin_lock_bh(&fl_priv->qos_stats_lock);
479 	curr_stats = &repr_priv->qos_table.curr_stats;
480 	prev_stats = &repr_priv->qos_table.prev_stats;
481 	diff_pkts = curr_stats->pkts - prev_stats->pkts;
482 	diff_bytes = curr_stats->bytes - prev_stats->bytes;
483 	prev_stats->pkts = curr_stats->pkts;
484 	prev_stats->bytes = curr_stats->bytes;
485 	spin_unlock_bh(&fl_priv->qos_stats_lock);
486 
487 	flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0,
488 			  repr_priv->qos_table.last_update,
489 			  FLOW_ACTION_HW_STATS_DELAYED);
490 	return 0;
491 }
492 
493 void nfp_flower_qos_init(struct nfp_app *app)
494 {
495 	struct nfp_flower_priv *fl_priv = app->priv;
496 
497 	spin_lock_init(&fl_priv->qos_stats_lock);
498 	mutex_init(&fl_priv->meter_stats_lock);
499 	nfp_init_meter_table(app);
500 
501 	INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
502 }
503 
504 void nfp_flower_qos_cleanup(struct nfp_app *app)
505 {
506 	struct nfp_flower_priv *fl_priv = app->priv;
507 
508 	cancel_delayed_work_sync(&fl_priv->qos_stats_work);
509 }
510 
511 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
512 				 struct tc_cls_matchall_offload *flow)
513 {
514 	struct netlink_ext_ack *extack = flow->common.extack;
515 	struct nfp_flower_priv *fl_priv = app->priv;
516 
517 	if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
518 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
519 		return -EOPNOTSUPP;
520 	}
521 
522 	switch (flow->command) {
523 	case TC_CLSMATCHALL_REPLACE:
524 		return nfp_flower_install_rate_limiter(app, netdev, flow,
525 						       extack);
526 	case TC_CLSMATCHALL_DESTROY:
527 		return nfp_flower_remove_rate_limiter(app, netdev, flow,
528 						      extack);
529 	case TC_CLSMATCHALL_STATS:
530 		return nfp_flower_stats_rate_limiter(app, netdev, flow,
531 						     extack);
532 	default:
533 		return -EOPNOTSUPP;
534 	}
535 }
536 
537 /* offload tc action, currently only for tc police */
538 
539 static const struct rhashtable_params stats_meter_table_params = {
540 	.key_offset	= offsetof(struct nfp_meter_entry, meter_id),
541 	.head_offset	= offsetof(struct nfp_meter_entry, ht_node),
542 	.key_len	= sizeof(u32),
543 };
544 
545 struct nfp_meter_entry *
546 nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id)
547 {
548 	struct nfp_flower_priv *priv = app->priv;
549 
550 	return rhashtable_lookup_fast(&priv->meter_table, &meter_id,
551 				      stats_meter_table_params);
552 }
553 
554 static struct nfp_meter_entry *
555 nfp_flower_add_meter_entry(struct nfp_app *app, u32 meter_id)
556 {
557 	struct nfp_meter_entry *meter_entry = NULL;
558 	struct nfp_flower_priv *priv = app->priv;
559 
560 	meter_entry = rhashtable_lookup_fast(&priv->meter_table,
561 					     &meter_id,
562 					     stats_meter_table_params);
563 	if (meter_entry)
564 		return meter_entry;
565 
566 	meter_entry = kzalloc(sizeof(*meter_entry), GFP_KERNEL);
567 	if (!meter_entry)
568 		return NULL;
569 
570 	meter_entry->meter_id = meter_id;
571 	meter_entry->used = jiffies;
572 	if (rhashtable_insert_fast(&priv->meter_table, &meter_entry->ht_node,
573 				   stats_meter_table_params)) {
574 		kfree(meter_entry);
575 		return NULL;
576 	}
577 
578 	priv->qos_rate_limiters++;
579 	if (priv->qos_rate_limiters == 1)
580 		schedule_delayed_work(&priv->qos_stats_work,
581 				      NFP_FL_QOS_UPDATE);
582 
583 	return meter_entry;
584 }
585 
586 static void nfp_flower_del_meter_entry(struct nfp_app *app, u32 meter_id)
587 {
588 	struct nfp_meter_entry *meter_entry = NULL;
589 	struct nfp_flower_priv *priv = app->priv;
590 
591 	meter_entry = rhashtable_lookup_fast(&priv->meter_table, &meter_id,
592 					     stats_meter_table_params);
593 	if (!meter_entry)
594 		return;
595 
596 	rhashtable_remove_fast(&priv->meter_table,
597 			       &meter_entry->ht_node,
598 			       stats_meter_table_params);
599 	kfree(meter_entry);
600 	priv->qos_rate_limiters--;
601 	if (!priv->qos_rate_limiters)
602 		cancel_delayed_work_sync(&priv->qos_stats_work);
603 }
604 
605 int nfp_flower_setup_meter_entry(struct nfp_app *app,
606 				 const struct flow_action_entry *action,
607 				 enum nfp_meter_op op,
608 				 u32 meter_id)
609 {
610 	struct nfp_flower_priv *fl_priv = app->priv;
611 	struct nfp_meter_entry *meter_entry = NULL;
612 	int err = 0;
613 
614 	mutex_lock(&fl_priv->meter_stats_lock);
615 
616 	switch (op) {
617 	case NFP_METER_DEL:
618 		nfp_flower_del_meter_entry(app, meter_id);
619 		goto exit_unlock;
620 	case NFP_METER_ADD:
621 		meter_entry = nfp_flower_add_meter_entry(app, meter_id);
622 		break;
623 	default:
624 		err = -EOPNOTSUPP;
625 		goto exit_unlock;
626 	}
627 
628 	if (!meter_entry) {
629 		err = -ENOMEM;
630 		goto exit_unlock;
631 	}
632 
633 	if (action->police.rate_bytes_ps > 0) {
634 		meter_entry->bps = true;
635 		meter_entry->rate = action->police.rate_bytes_ps;
636 		meter_entry->burst = action->police.burst;
637 	} else {
638 		meter_entry->bps = false;
639 		meter_entry->rate = action->police.rate_pkt_ps;
640 		meter_entry->burst = action->police.burst_pkt;
641 	}
642 
643 exit_unlock:
644 	mutex_unlock(&fl_priv->meter_stats_lock);
645 	return err;
646 }
647 
648 int nfp_init_meter_table(struct nfp_app *app)
649 {
650 	struct nfp_flower_priv *priv = app->priv;
651 
652 	return rhashtable_init(&priv->meter_table, &stats_meter_table_params);
653 }
654 
655 void
656 nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv)
657 {
658 	struct nfp_meter_entry *meter_entry = NULL;
659 	struct rhashtable_iter iter;
660 
661 	mutex_lock(&fl_priv->meter_stats_lock);
662 	rhashtable_walk_enter(&fl_priv->meter_table, &iter);
663 	rhashtable_walk_start(&iter);
664 
665 	while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) {
666 		if (IS_ERR(meter_entry))
667 			continue;
668 		nfp_flower_stats_rlim_request(fl_priv,
669 					      meter_entry->meter_id, false);
670 	}
671 
672 	rhashtable_walk_stop(&iter);
673 	rhashtable_walk_exit(&iter);
674 	mutex_unlock(&fl_priv->meter_stats_lock);
675 }
676 
677 static int
678 nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
679 			struct netlink_ext_ack *extack)
680 {
681 	struct flow_action_entry *paction = &fl_act->action.entries[0];
682 	u32 action_num = fl_act->action.num_entries;
683 	struct nfp_flower_priv *fl_priv = app->priv;
684 	struct flow_action_entry *action = NULL;
685 	u32 burst, i, meter_id;
686 	bool pps_support, pps;
687 	bool add = false;
688 	u64 rate;
689 
690 	pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
691 
692 	for (i = 0 ; i < action_num; i++) {
693 		/*set qos associate data for this interface */
694 		action = paction + i;
695 		if (action->id != FLOW_ACTION_POLICE) {
696 			NL_SET_ERR_MSG_MOD(extack,
697 					   "unsupported offload: qos rate limit offload requires police action");
698 			continue;
699 		}
700 		if (action->police.rate_bytes_ps > 0) {
701 			rate = action->police.rate_bytes_ps;
702 			burst = action->police.burst;
703 		} else if (action->police.rate_pkt_ps > 0 && pps_support) {
704 			rate = action->police.rate_pkt_ps;
705 			burst = action->police.burst_pkt;
706 		} else {
707 			NL_SET_ERR_MSG_MOD(extack,
708 					   "unsupported offload: unsupported qos rate limit");
709 			continue;
710 		}
711 
712 		if (rate != 0) {
713 			meter_id = action->hw_index;
714 			if (nfp_flower_setup_meter_entry(app, action, NFP_METER_ADD, meter_id))
715 				continue;
716 
717 			pps = false;
718 			if (action->police.rate_pkt_ps > 0)
719 				pps = true;
720 			nfp_flower_offload_one_police(app, false, pps, meter_id,
721 						      rate, burst);
722 			add = true;
723 		}
724 	}
725 
726 	return add ? 0 : -EOPNOTSUPP;
727 }
728 
729 static int
730 nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
731 		       struct netlink_ext_ack *extack)
732 {
733 	struct nfp_meter_entry *meter_entry = NULL;
734 	struct nfp_police_config *config;
735 	struct sk_buff *skb;
736 	u32 meter_id;
737 	bool pps;
738 
739 	/*delete qos associate data for this interface */
740 	if (fl_act->id != FLOW_ACTION_POLICE) {
741 		NL_SET_ERR_MSG_MOD(extack,
742 				   "unsupported offload: qos rate limit offload requires police action");
743 		return -EOPNOTSUPP;
744 	}
745 
746 	meter_id = fl_act->index;
747 	meter_entry = nfp_flower_search_meter_entry(app, meter_id);
748 	if (!meter_entry) {
749 		NL_SET_ERR_MSG_MOD(extack,
750 				   "no meter entry when delete the action index.");
751 		return -ENOENT;
752 	}
753 	pps = !meter_entry->bps;
754 
755 	skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
756 				    NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
757 	if (!skb)
758 		return -ENOMEM;
759 
760 	config = nfp_flower_cmsg_get_data(skb);
761 	memset(config, 0, sizeof(struct nfp_police_config));
762 	config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
763 	config->head.meter_id = cpu_to_be32(meter_id);
764 	if (pps)
765 		config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
766 
767 	nfp_ctrl_tx(app->ctrl, skb);
768 	nfp_flower_setup_meter_entry(app, NULL, NFP_METER_DEL, meter_id);
769 
770 	return 0;
771 }
772 
773 void
774 nfp_act_stats_reply(struct nfp_app *app, void *pmsg)
775 {
776 	struct nfp_flower_priv *fl_priv = app->priv;
777 	struct nfp_meter_entry *meter_entry = NULL;
778 	struct nfp_police_stats_reply *msg = pmsg;
779 	u32 meter_id;
780 
781 	meter_id = be32_to_cpu(msg->head.meter_id);
782 	mutex_lock(&fl_priv->meter_stats_lock);
783 
784 	meter_entry = nfp_flower_search_meter_entry(app, meter_id);
785 	if (!meter_entry)
786 		goto exit_unlock;
787 
788 	meter_entry->stats.curr.pkts = be64_to_cpu(msg->pass_pkts) +
789 				       be64_to_cpu(msg->drop_pkts);
790 	meter_entry->stats.curr.bytes = be64_to_cpu(msg->pass_bytes) +
791 					be64_to_cpu(msg->drop_bytes);
792 	meter_entry->stats.curr.drops = be64_to_cpu(msg->drop_pkts);
793 	if (!meter_entry->stats.update) {
794 		meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
795 		meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
796 		meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
797 	}
798 
799 	meter_entry->stats.update = jiffies;
800 
801 exit_unlock:
802 	mutex_unlock(&fl_priv->meter_stats_lock);
803 }
804 
805 static int
806 nfp_act_stats_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
807 		      struct netlink_ext_ack *extack)
808 {
809 	struct nfp_flower_priv *fl_priv = app->priv;
810 	struct nfp_meter_entry *meter_entry = NULL;
811 	u64 diff_bytes, diff_pkts, diff_drops;
812 	int err = 0;
813 
814 	if (fl_act->id != FLOW_ACTION_POLICE) {
815 		NL_SET_ERR_MSG_MOD(extack,
816 				   "unsupported offload: qos rate limit offload requires police action");
817 		return -EOPNOTSUPP;
818 	}
819 
820 	mutex_lock(&fl_priv->meter_stats_lock);
821 	meter_entry = nfp_flower_search_meter_entry(app, fl_act->index);
822 	if (!meter_entry) {
823 		err = -ENOENT;
824 		goto exit_unlock;
825 	}
826 	diff_pkts = meter_entry->stats.curr.pkts > meter_entry->stats.prev.pkts ?
827 		    meter_entry->stats.curr.pkts - meter_entry->stats.prev.pkts : 0;
828 	diff_bytes = meter_entry->stats.curr.bytes > meter_entry->stats.prev.bytes ?
829 		     meter_entry->stats.curr.bytes - meter_entry->stats.prev.bytes : 0;
830 	diff_drops = meter_entry->stats.curr.drops > meter_entry->stats.prev.drops ?
831 		     meter_entry->stats.curr.drops - meter_entry->stats.prev.drops : 0;
832 
833 	flow_stats_update(&fl_act->stats, diff_bytes, diff_pkts, diff_drops,
834 			  meter_entry->stats.update,
835 			  FLOW_ACTION_HW_STATS_DELAYED);
836 
837 	meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
838 	meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
839 	meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
840 
841 exit_unlock:
842 	mutex_unlock(&fl_priv->meter_stats_lock);
843 	return err;
844 }
845 
846 int nfp_setup_tc_act_offload(struct nfp_app *app,
847 			     struct flow_offload_action *fl_act)
848 {
849 	struct netlink_ext_ack *extack = fl_act->extack;
850 	struct nfp_flower_priv *fl_priv = app->priv;
851 
852 	if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER))
853 		return -EOPNOTSUPP;
854 
855 	switch (fl_act->command) {
856 	case FLOW_ACT_REPLACE:
857 		return nfp_act_install_actions(app, fl_act, extack);
858 	case FLOW_ACT_DESTROY:
859 		return nfp_act_remove_actions(app, fl_act, extack);
860 	case FLOW_ACT_STATS:
861 		return nfp_act_stats_actions(app, fl_act, extack);
862 	default:
863 		return -EOPNOTSUPP;
864 	}
865 }
866