xref: /linux/net/sched/act_police.c (revision 1e525507)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_police.c	Input police filter
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  * 		J Hadi Salim (action changes)
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <net/act_api.h>
19 #include <net/gso.h>
20 #include <net/netlink.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_police.h>
23 #include <net/tc_wrapper.h>
24 
25 /* Each policer is serialized by its individual spinlock */
26 
27 static struct tc_action_ops act_police_ops;
28 
29 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
30 	[TCA_POLICE_RATE]	= { .len = TC_RTAB_SIZE },
31 	[TCA_POLICE_PEAKRATE]	= { .len = TC_RTAB_SIZE },
32 	[TCA_POLICE_AVRATE]	= { .type = NLA_U32 },
33 	[TCA_POLICE_RESULT]	= { .type = NLA_U32 },
34 	[TCA_POLICE_RATE64]     = { .type = NLA_U64 },
35 	[TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 },
36 	[TCA_POLICE_PKTRATE64]  = { .type = NLA_U64, .min = 1 },
37 	[TCA_POLICE_PKTBURST64] = { .type = NLA_U64, .min = 1 },
38 };
39 
40 static int tcf_police_init(struct net *net, struct nlattr *nla,
41 			       struct nlattr *est, struct tc_action **a,
42 			       struct tcf_proto *tp, u32 flags,
43 			       struct netlink_ext_ack *extack)
44 {
45 	int ret = 0, tcfp_result = TC_ACT_OK, err, size;
46 	bool bind = flags & TCA_ACT_FLAGS_BIND;
47 	struct nlattr *tb[TCA_POLICE_MAX + 1];
48 	struct tcf_chain *goto_ch = NULL;
49 	struct tc_police *parm;
50 	struct tcf_police *police;
51 	struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
52 	struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
53 	struct tcf_police_params *new;
54 	bool exists = false;
55 	u32 index;
56 	u64 rate64, prate64;
57 	u64 pps, ppsburst;
58 
59 	if (nla == NULL)
60 		return -EINVAL;
61 
62 	err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla,
63 					  police_policy, NULL);
64 	if (err < 0)
65 		return err;
66 
67 	if (tb[TCA_POLICE_TBF] == NULL)
68 		return -EINVAL;
69 	size = nla_len(tb[TCA_POLICE_TBF]);
70 	if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
71 		return -EINVAL;
72 
73 	parm = nla_data(tb[TCA_POLICE_TBF]);
74 	index = parm->index;
75 	err = tcf_idr_check_alloc(tn, &index, a, bind);
76 	if (err < 0)
77 		return err;
78 	exists = err;
79 	if (exists && bind)
80 		return ACT_P_BOUND;
81 
82 	if (!exists) {
83 		ret = tcf_idr_create(tn, index, NULL, a,
84 				     &act_police_ops, bind, true, flags);
85 		if (ret) {
86 			tcf_idr_cleanup(tn, index);
87 			return ret;
88 		}
89 		ret = ACT_P_CREATED;
90 		spin_lock_init(&(to_police(*a)->tcfp_lock));
91 	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
92 		tcf_idr_release(*a, bind);
93 		return -EEXIST;
94 	}
95 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
96 	if (err < 0)
97 		goto release_idr;
98 
99 	police = to_police(*a);
100 	if (parm->rate.rate) {
101 		err = -ENOMEM;
102 		R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
103 		if (R_tab == NULL)
104 			goto failure;
105 
106 		if (parm->peakrate.rate) {
107 			P_tab = qdisc_get_rtab(&parm->peakrate,
108 					       tb[TCA_POLICE_PEAKRATE], NULL);
109 			if (P_tab == NULL)
110 				goto failure;
111 		}
112 	}
113 
114 	if (est) {
115 		err = gen_replace_estimator(&police->tcf_bstats,
116 					    police->common.cpu_bstats,
117 					    &police->tcf_rate_est,
118 					    &police->tcf_lock,
119 					    false, est);
120 		if (err)
121 			goto failure;
122 	} else if (tb[TCA_POLICE_AVRATE] &&
123 		   (ret == ACT_P_CREATED ||
124 		    !gen_estimator_active(&police->tcf_rate_est))) {
125 		err = -EINVAL;
126 		goto failure;
127 	}
128 
129 	if (tb[TCA_POLICE_RESULT]) {
130 		tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
131 		if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
132 			NL_SET_ERR_MSG(extack,
133 				       "goto chain not allowed on fallback");
134 			err = -EINVAL;
135 			goto failure;
136 		}
137 	}
138 
139 	if ((tb[TCA_POLICE_PKTRATE64] && !tb[TCA_POLICE_PKTBURST64]) ||
140 	    (!tb[TCA_POLICE_PKTRATE64] && tb[TCA_POLICE_PKTBURST64])) {
141 		NL_SET_ERR_MSG(extack,
142 			       "Both or neither packet-per-second burst and rate must be provided");
143 		err = -EINVAL;
144 		goto failure;
145 	}
146 
147 	if (tb[TCA_POLICE_PKTRATE64] && R_tab) {
148 		NL_SET_ERR_MSG(extack,
149 			       "packet-per-second and byte-per-second rate limits not allowed in same action");
150 		err = -EINVAL;
151 		goto failure;
152 	}
153 
154 	new = kzalloc(sizeof(*new), GFP_KERNEL);
155 	if (unlikely(!new)) {
156 		err = -ENOMEM;
157 		goto failure;
158 	}
159 
160 	/* No failure allowed after this point */
161 	new->tcfp_result = tcfp_result;
162 	new->tcfp_mtu = parm->mtu;
163 	if (!new->tcfp_mtu) {
164 		new->tcfp_mtu = ~0;
165 		if (R_tab)
166 			new->tcfp_mtu = 255 << R_tab->rate.cell_log;
167 	}
168 	if (R_tab) {
169 		new->rate_present = true;
170 		rate64 = tb[TCA_POLICE_RATE64] ?
171 			 nla_get_u64(tb[TCA_POLICE_RATE64]) : 0;
172 		psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64);
173 		qdisc_put_rtab(R_tab);
174 	} else {
175 		new->rate_present = false;
176 	}
177 	if (P_tab) {
178 		new->peak_present = true;
179 		prate64 = tb[TCA_POLICE_PEAKRATE64] ?
180 			  nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0;
181 		psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64);
182 		qdisc_put_rtab(P_tab);
183 	} else {
184 		new->peak_present = false;
185 	}
186 
187 	new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
188 	if (new->peak_present)
189 		new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
190 							 new->tcfp_mtu);
191 
192 	if (tb[TCA_POLICE_AVRATE])
193 		new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
194 
195 	if (tb[TCA_POLICE_PKTRATE64]) {
196 		pps = nla_get_u64(tb[TCA_POLICE_PKTRATE64]);
197 		ppsburst = nla_get_u64(tb[TCA_POLICE_PKTBURST64]);
198 		new->pps_present = true;
199 		new->tcfp_pkt_burst = PSCHED_TICKS2NS(ppsburst);
200 		psched_ppscfg_precompute(&new->ppsrate, pps);
201 	}
202 
203 	spin_lock_bh(&police->tcf_lock);
204 	spin_lock_bh(&police->tcfp_lock);
205 	police->tcfp_t_c = ktime_get_ns();
206 	police->tcfp_toks = new->tcfp_burst;
207 	if (new->peak_present)
208 		police->tcfp_ptoks = new->tcfp_mtu_ptoks;
209 	spin_unlock_bh(&police->tcfp_lock);
210 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
211 	new = rcu_replace_pointer(police->params,
212 				  new,
213 				  lockdep_is_held(&police->tcf_lock));
214 	spin_unlock_bh(&police->tcf_lock);
215 
216 	if (goto_ch)
217 		tcf_chain_put_by_act(goto_ch);
218 	if (new)
219 		kfree_rcu(new, rcu);
220 
221 	return ret;
222 
223 failure:
224 	qdisc_put_rtab(P_tab);
225 	qdisc_put_rtab(R_tab);
226 	if (goto_ch)
227 		tcf_chain_put_by_act(goto_ch);
228 release_idr:
229 	tcf_idr_release(*a, bind);
230 	return err;
231 }
232 
233 static bool tcf_police_mtu_check(struct sk_buff *skb, u32 limit)
234 {
235 	u32 len;
236 
237 	if (skb_is_gso(skb))
238 		return skb_gso_validate_mac_len(skb, limit);
239 
240 	len = qdisc_pkt_len(skb);
241 	if (skb_at_tc_ingress(skb))
242 		len += skb->mac_len;
243 
244 	return len <= limit;
245 }
246 
247 TC_INDIRECT_SCOPE int tcf_police_act(struct sk_buff *skb,
248 				     const struct tc_action *a,
249 				     struct tcf_result *res)
250 {
251 	struct tcf_police *police = to_police(a);
252 	s64 now, toks, ppstoks = 0, ptoks = 0;
253 	struct tcf_police_params *p;
254 	int ret;
255 
256 	tcf_lastuse_update(&police->tcf_tm);
257 	bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb);
258 
259 	ret = READ_ONCE(police->tcf_action);
260 	p = rcu_dereference_bh(police->params);
261 
262 	if (p->tcfp_ewma_rate) {
263 		struct gnet_stats_rate_est64 sample;
264 
265 		if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
266 		    sample.bps >= p->tcfp_ewma_rate)
267 			goto inc_overlimits;
268 	}
269 
270 	if (tcf_police_mtu_check(skb, p->tcfp_mtu)) {
271 		if (!p->rate_present && !p->pps_present) {
272 			ret = p->tcfp_result;
273 			goto end;
274 		}
275 
276 		now = ktime_get_ns();
277 		spin_lock_bh(&police->tcfp_lock);
278 		toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
279 		if (p->peak_present) {
280 			ptoks = toks + police->tcfp_ptoks;
281 			if (ptoks > p->tcfp_mtu_ptoks)
282 				ptoks = p->tcfp_mtu_ptoks;
283 			ptoks -= (s64)psched_l2t_ns(&p->peak,
284 						    qdisc_pkt_len(skb));
285 		}
286 		if (p->rate_present) {
287 			toks += police->tcfp_toks;
288 			if (toks > p->tcfp_burst)
289 				toks = p->tcfp_burst;
290 			toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
291 		} else if (p->pps_present) {
292 			ppstoks = min_t(s64, now - police->tcfp_t_c, p->tcfp_pkt_burst);
293 			ppstoks += police->tcfp_pkttoks;
294 			if (ppstoks > p->tcfp_pkt_burst)
295 				ppstoks = p->tcfp_pkt_burst;
296 			ppstoks -= (s64)psched_pkt2t_ns(&p->ppsrate, 1);
297 		}
298 		if ((toks | ptoks | ppstoks) >= 0) {
299 			police->tcfp_t_c = now;
300 			police->tcfp_toks = toks;
301 			police->tcfp_ptoks = ptoks;
302 			police->tcfp_pkttoks = ppstoks;
303 			spin_unlock_bh(&police->tcfp_lock);
304 			ret = p->tcfp_result;
305 			goto inc_drops;
306 		}
307 		spin_unlock_bh(&police->tcfp_lock);
308 	}
309 
310 inc_overlimits:
311 	qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
312 inc_drops:
313 	if (ret == TC_ACT_SHOT)
314 		qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
315 end:
316 	return ret;
317 }
318 
319 static void tcf_police_cleanup(struct tc_action *a)
320 {
321 	struct tcf_police *police = to_police(a);
322 	struct tcf_police_params *p;
323 
324 	p = rcu_dereference_protected(police->params, 1);
325 	if (p)
326 		kfree_rcu(p, rcu);
327 }
328 
329 static void tcf_police_stats_update(struct tc_action *a,
330 				    u64 bytes, u64 packets, u64 drops,
331 				    u64 lastuse, bool hw)
332 {
333 	struct tcf_police *police = to_police(a);
334 	struct tcf_t *tm = &police->tcf_tm;
335 
336 	tcf_action_update_stats(a, bytes, packets, drops, hw);
337 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
338 }
339 
340 static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
341 			       int bind, int ref)
342 {
343 	unsigned char *b = skb_tail_pointer(skb);
344 	struct tcf_police *police = to_police(a);
345 	struct tcf_police_params *p;
346 	struct tc_police opt = {
347 		.index = police->tcf_index,
348 		.refcnt = refcount_read(&police->tcf_refcnt) - ref,
349 		.bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
350 	};
351 	struct tcf_t t;
352 
353 	spin_lock_bh(&police->tcf_lock);
354 	opt.action = police->tcf_action;
355 	p = rcu_dereference_protected(police->params,
356 				      lockdep_is_held(&police->tcf_lock));
357 	opt.mtu = p->tcfp_mtu;
358 	opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
359 	if (p->rate_present) {
360 		psched_ratecfg_getrate(&opt.rate, &p->rate);
361 		if ((p->rate.rate_bytes_ps >= (1ULL << 32)) &&
362 		    nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
363 				      p->rate.rate_bytes_ps,
364 				      TCA_POLICE_PAD))
365 			goto nla_put_failure;
366 	}
367 	if (p->peak_present) {
368 		psched_ratecfg_getrate(&opt.peakrate, &p->peak);
369 		if ((p->peak.rate_bytes_ps >= (1ULL << 32)) &&
370 		    nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
371 				      p->peak.rate_bytes_ps,
372 				      TCA_POLICE_PAD))
373 			goto nla_put_failure;
374 	}
375 	if (p->pps_present) {
376 		if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
377 				      p->ppsrate.rate_pkts_ps,
378 				      TCA_POLICE_PAD))
379 			goto nla_put_failure;
380 		if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
381 				      PSCHED_NS2TICKS(p->tcfp_pkt_burst),
382 				      TCA_POLICE_PAD))
383 			goto nla_put_failure;
384 	}
385 	if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
386 		goto nla_put_failure;
387 	if (p->tcfp_result &&
388 	    nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
389 		goto nla_put_failure;
390 	if (p->tcfp_ewma_rate &&
391 	    nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
392 		goto nla_put_failure;
393 
394 	tcf_tm_dump(&t, &police->tcf_tm);
395 	if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
396 		goto nla_put_failure;
397 	spin_unlock_bh(&police->tcf_lock);
398 
399 	return skb->len;
400 
401 nla_put_failure:
402 	spin_unlock_bh(&police->tcf_lock);
403 	nlmsg_trim(skb, b);
404 	return -1;
405 }
406 
407 static int tcf_police_act_to_flow_act(int tc_act, u32 *extval,
408 				      struct netlink_ext_ack *extack)
409 {
410 	int act_id = -EOPNOTSUPP;
411 
412 	if (!TC_ACT_EXT_OPCODE(tc_act)) {
413 		if (tc_act == TC_ACT_OK)
414 			act_id = FLOW_ACTION_ACCEPT;
415 		else if (tc_act ==  TC_ACT_SHOT)
416 			act_id = FLOW_ACTION_DROP;
417 		else if (tc_act == TC_ACT_PIPE)
418 			act_id = FLOW_ACTION_PIPE;
419 		else if (tc_act == TC_ACT_RECLASSIFY)
420 			NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"reclassify\"");
421 		else
422 			NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
423 	} else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_GOTO_CHAIN)) {
424 		act_id = FLOW_ACTION_GOTO;
425 		*extval = tc_act & TC_ACT_EXT_VAL_MASK;
426 	} else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_JUMP)) {
427 		act_id = FLOW_ACTION_JUMP;
428 		*extval = tc_act & TC_ACT_EXT_VAL_MASK;
429 	} else if (tc_act == TC_ACT_UNSPEC) {
430 		act_id = FLOW_ACTION_CONTINUE;
431 	} else {
432 		NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
433 	}
434 
435 	return act_id;
436 }
437 
438 static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data,
439 					u32 *index_inc, bool bind,
440 					struct netlink_ext_ack *extack)
441 {
442 	if (bind) {
443 		struct flow_action_entry *entry = entry_data;
444 		struct tcf_police *police = to_police(act);
445 		struct tcf_police_params *p;
446 		int act_id;
447 
448 		p = rcu_dereference_protected(police->params,
449 					      lockdep_is_held(&police->tcf_lock));
450 
451 		entry->id = FLOW_ACTION_POLICE;
452 		entry->police.burst = tcf_police_burst(act);
453 		entry->police.rate_bytes_ps =
454 			tcf_police_rate_bytes_ps(act);
455 		entry->police.peakrate_bytes_ps = tcf_police_peakrate_bytes_ps(act);
456 		entry->police.avrate = tcf_police_tcfp_ewma_rate(act);
457 		entry->police.overhead = tcf_police_rate_overhead(act);
458 		entry->police.burst_pkt = tcf_police_burst_pkt(act);
459 		entry->police.rate_pkt_ps =
460 			tcf_police_rate_pkt_ps(act);
461 		entry->police.mtu = tcf_police_tcfp_mtu(act);
462 
463 		act_id = tcf_police_act_to_flow_act(police->tcf_action,
464 						    &entry->police.exceed.extval,
465 						    extack);
466 		if (act_id < 0)
467 			return act_id;
468 
469 		entry->police.exceed.act_id = act_id;
470 
471 		act_id = tcf_police_act_to_flow_act(p->tcfp_result,
472 						    &entry->police.notexceed.extval,
473 						    extack);
474 		if (act_id < 0)
475 			return act_id;
476 
477 		entry->police.notexceed.act_id = act_id;
478 
479 		*index_inc = 1;
480 	} else {
481 		struct flow_offload_action *fl_action = entry_data;
482 
483 		fl_action->id = FLOW_ACTION_POLICE;
484 	}
485 
486 	return 0;
487 }
488 
489 MODULE_AUTHOR("Alexey Kuznetsov");
490 MODULE_DESCRIPTION("Policing actions");
491 MODULE_LICENSE("GPL");
492 
493 static struct tc_action_ops act_police_ops = {
494 	.kind		=	"police",
495 	.id		=	TCA_ID_POLICE,
496 	.owner		=	THIS_MODULE,
497 	.stats_update	=	tcf_police_stats_update,
498 	.act		=	tcf_police_act,
499 	.dump		=	tcf_police_dump,
500 	.init		=	tcf_police_init,
501 	.cleanup	=	tcf_police_cleanup,
502 	.offload_act_setup =	tcf_police_offload_act_setup,
503 	.size		=	sizeof(struct tcf_police),
504 };
505 MODULE_ALIAS_NET_ACT("police");
506 
507 static __net_init int police_init_net(struct net *net)
508 {
509 	struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
510 
511 	return tc_action_net_init(net, tn, &act_police_ops);
512 }
513 
514 static void __net_exit police_exit_net(struct list_head *net_list)
515 {
516 	tc_action_net_exit(net_list, act_police_ops.net_id);
517 }
518 
519 static struct pernet_operations police_net_ops = {
520 	.init = police_init_net,
521 	.exit_batch = police_exit_net,
522 	.id   = &act_police_ops.net_id,
523 	.size = sizeof(struct tc_action_net),
524 };
525 
526 static int __init police_init_module(void)
527 {
528 	return tcf_register_action(&act_police_ops, &police_net_ops);
529 }
530 
531 static void __exit police_cleanup_module(void)
532 {
533 	tcf_unregister_action(&act_police_ops, &police_net_ops);
534 }
535 
536 module_init(police_init_module);
537 module_exit(police_cleanup_module);
538