xref: /linux/net/sched/cls_matchall.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_matchll.c		Match-all classifier
4  *
5  * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 
13 #include <net/sch_generic.h>
14 #include <net/pkt_cls.h>
15 
16 struct cls_mall_head {
17 	struct tcf_exts exts;
18 	struct tcf_result res;
19 	u32 handle;
20 	u32 flags;
21 	unsigned int in_hw_count;
22 	struct tc_matchall_pcnt __percpu *pf;
23 	struct rcu_work rwork;
24 };
25 
26 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
27 			 struct tcf_result *res)
28 {
29 	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
30 
31 	if (unlikely(!head))
32 		return -1;
33 
34 	if (tc_skip_sw(head->flags))
35 		return -1;
36 
37 	*res = head->res;
38 	__this_cpu_inc(head->pf->rhit);
39 	return tcf_exts_exec(skb, &head->exts, res);
40 }
41 
42 static int mall_init(struct tcf_proto *tp)
43 {
44 	return 0;
45 }
46 
47 static void __mall_destroy(struct cls_mall_head *head)
48 {
49 	tcf_exts_destroy(&head->exts);
50 	tcf_exts_put_net(&head->exts);
51 	free_percpu(head->pf);
52 	kfree(head);
53 }
54 
55 static void mall_destroy_work(struct work_struct *work)
56 {
57 	struct cls_mall_head *head = container_of(to_rcu_work(work),
58 						  struct cls_mall_head,
59 						  rwork);
60 	rtnl_lock();
61 	__mall_destroy(head);
62 	rtnl_unlock();
63 }
64 
65 static void mall_destroy_hw_filter(struct tcf_proto *tp,
66 				   struct cls_mall_head *head,
67 				   unsigned long cookie,
68 				   struct netlink_ext_ack *extack)
69 {
70 	struct tc_cls_matchall_offload cls_mall = {};
71 	struct tcf_block *block = tp->chain->block;
72 
73 	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
74 	cls_mall.command = TC_CLSMATCHALL_DESTROY;
75 	cls_mall.cookie = cookie;
76 
77 	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false);
78 	tcf_block_offload_dec(block, &head->flags);
79 }
80 
81 static int mall_replace_hw_filter(struct tcf_proto *tp,
82 				  struct cls_mall_head *head,
83 				  unsigned long cookie,
84 				  struct netlink_ext_ack *extack)
85 {
86 	struct tc_cls_matchall_offload cls_mall = {};
87 	struct tcf_block *block = tp->chain->block;
88 	bool skip_sw = tc_skip_sw(head->flags);
89 	int err;
90 
91 	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
92 	if (!cls_mall.rule)
93 		return -ENOMEM;
94 
95 	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
96 	cls_mall.command = TC_CLSMATCHALL_REPLACE;
97 	cls_mall.cookie = cookie;
98 
99 	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
100 	if (err) {
101 		kfree(cls_mall.rule);
102 		mall_destroy_hw_filter(tp, head, cookie, NULL);
103 		if (skip_sw)
104 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
105 		else
106 			err = 0;
107 
108 		return err;
109 	}
110 
111 	err = tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw);
112 	kfree(cls_mall.rule);
113 
114 	if (err < 0) {
115 		mall_destroy_hw_filter(tp, head, cookie, NULL);
116 		return err;
117 	} else if (err > 0) {
118 		head->in_hw_count = err;
119 		tcf_block_offload_inc(block, &head->flags);
120 	}
121 
122 	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
123 		return -EINVAL;
124 
125 	return 0;
126 }
127 
128 static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
129 			 struct netlink_ext_ack *extack)
130 {
131 	struct cls_mall_head *head = rtnl_dereference(tp->root);
132 
133 	if (!head)
134 		return;
135 
136 	tcf_unbind_filter(tp, &head->res);
137 
138 	if (!tc_skip_hw(head->flags))
139 		mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
140 
141 	if (tcf_exts_get_net(&head->exts))
142 		tcf_queue_work(&head->rwork, mall_destroy_work);
143 	else
144 		__mall_destroy(head);
145 }
146 
147 static void *mall_get(struct tcf_proto *tp, u32 handle)
148 {
149 	struct cls_mall_head *head = rtnl_dereference(tp->root);
150 
151 	if (head && head->handle == handle)
152 		return head;
153 
154 	return NULL;
155 }
156 
157 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
158 	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
159 	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
160 };
161 
162 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
163 			  struct cls_mall_head *head,
164 			  unsigned long base, struct nlattr **tb,
165 			  struct nlattr *est, bool ovr,
166 			  struct netlink_ext_ack *extack)
167 {
168 	int err;
169 
170 	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
171 				extack);
172 	if (err < 0)
173 		return err;
174 
175 	if (tb[TCA_MATCHALL_CLASSID]) {
176 		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
177 		tcf_bind_filter(tp, &head->res, base);
178 	}
179 	return 0;
180 }
181 
182 static int mall_change(struct net *net, struct sk_buff *in_skb,
183 		       struct tcf_proto *tp, unsigned long base,
184 		       u32 handle, struct nlattr **tca,
185 		       void **arg, bool ovr, bool rtnl_held,
186 		       struct netlink_ext_ack *extack)
187 {
188 	struct cls_mall_head *head = rtnl_dereference(tp->root);
189 	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
190 	struct cls_mall_head *new;
191 	u32 flags = 0;
192 	int err;
193 
194 	if (!tca[TCA_OPTIONS])
195 		return -EINVAL;
196 
197 	if (head)
198 		return -EEXIST;
199 
200 	err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
201 					  tca[TCA_OPTIONS], mall_policy, NULL);
202 	if (err < 0)
203 		return err;
204 
205 	if (tb[TCA_MATCHALL_FLAGS]) {
206 		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
207 		if (!tc_flags_valid(flags))
208 			return -EINVAL;
209 	}
210 
211 	new = kzalloc(sizeof(*new), GFP_KERNEL);
212 	if (!new)
213 		return -ENOBUFS;
214 
215 	err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
216 	if (err)
217 		goto err_exts_init;
218 
219 	if (!handle)
220 		handle = 1;
221 	new->handle = handle;
222 	new->flags = flags;
223 	new->pf = alloc_percpu(struct tc_matchall_pcnt);
224 	if (!new->pf) {
225 		err = -ENOMEM;
226 		goto err_alloc_percpu;
227 	}
228 
229 	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
230 			     extack);
231 	if (err)
232 		goto err_set_parms;
233 
234 	if (!tc_skip_hw(new->flags)) {
235 		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
236 					     extack);
237 		if (err)
238 			goto err_replace_hw_filter;
239 	}
240 
241 	if (!tc_in_hw(new->flags))
242 		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
243 
244 	*arg = head;
245 	rcu_assign_pointer(tp->root, new);
246 	return 0;
247 
248 err_replace_hw_filter:
249 err_set_parms:
250 	free_percpu(new->pf);
251 err_alloc_percpu:
252 	tcf_exts_destroy(&new->exts);
253 err_exts_init:
254 	kfree(new);
255 	return err;
256 }
257 
258 static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
259 		       bool rtnl_held, struct netlink_ext_ack *extack)
260 {
261 	return -EOPNOTSUPP;
262 }
263 
264 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
265 		      bool rtnl_held)
266 {
267 	struct cls_mall_head *head = rtnl_dereference(tp->root);
268 
269 	if (arg->count < arg->skip)
270 		goto skip;
271 
272 	if (!head)
273 		return;
274 	if (arg->fn(tp, head, arg) < 0)
275 		arg->stop = 1;
276 skip:
277 	arg->count++;
278 }
279 
280 static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
281 			  void *cb_priv, struct netlink_ext_ack *extack)
282 {
283 	struct cls_mall_head *head = rtnl_dereference(tp->root);
284 	struct tc_cls_matchall_offload cls_mall = {};
285 	struct tcf_block *block = tp->chain->block;
286 	int err;
287 
288 	if (tc_skip_hw(head->flags))
289 		return 0;
290 
291 	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
292 	if (!cls_mall.rule)
293 		return -ENOMEM;
294 
295 	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
296 	cls_mall.command = add ?
297 		TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
298 	cls_mall.cookie = (unsigned long)head;
299 
300 	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
301 	if (err) {
302 		kfree(cls_mall.rule);
303 		if (add && tc_skip_sw(head->flags)) {
304 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
305 			return err;
306 		}
307 		return 0;
308 	}
309 
310 	err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv);
311 	kfree(cls_mall.rule);
312 
313 	if (err) {
314 		if (add && tc_skip_sw(head->flags))
315 			return err;
316 		return 0;
317 	}
318 
319 	tc_cls_offload_cnt_update(block, &head->in_hw_count, &head->flags, add);
320 
321 	return 0;
322 }
323 
324 static void mall_stats_hw_filter(struct tcf_proto *tp,
325 				 struct cls_mall_head *head,
326 				 unsigned long cookie)
327 {
328 	struct tc_cls_matchall_offload cls_mall = {};
329 	struct tcf_block *block = tp->chain->block;
330 
331 	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
332 	cls_mall.command = TC_CLSMATCHALL_STATS;
333 	cls_mall.cookie = cookie;
334 
335 	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false);
336 
337 	tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
338 			      cls_mall.stats.pkts, cls_mall.stats.lastused);
339 }
340 
341 static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
342 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
343 {
344 	struct tc_matchall_pcnt gpf = {};
345 	struct cls_mall_head *head = fh;
346 	struct nlattr *nest;
347 	int cpu;
348 
349 	if (!head)
350 		return skb->len;
351 
352 	if (!tc_skip_hw(head->flags))
353 		mall_stats_hw_filter(tp, head, (unsigned long)head);
354 
355 	t->tcm_handle = head->handle;
356 
357 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
358 	if (!nest)
359 		goto nla_put_failure;
360 
361 	if (head->res.classid &&
362 	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
363 		goto nla_put_failure;
364 
365 	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
366 		goto nla_put_failure;
367 
368 	for_each_possible_cpu(cpu) {
369 		struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
370 
371 		gpf.rhit += pf->rhit;
372 	}
373 
374 	if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
375 			  sizeof(struct tc_matchall_pcnt),
376 			  &gpf, TCA_MATCHALL_PAD))
377 		goto nla_put_failure;
378 
379 	if (tcf_exts_dump(skb, &head->exts))
380 		goto nla_put_failure;
381 
382 	nla_nest_end(skb, nest);
383 
384 	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
385 		goto nla_put_failure;
386 
387 	return skb->len;
388 
389 nla_put_failure:
390 	nla_nest_cancel(skb, nest);
391 	return -1;
392 }
393 
394 static void mall_bind_class(void *fh, u32 classid, unsigned long cl)
395 {
396 	struct cls_mall_head *head = fh;
397 
398 	if (head && head->res.classid == classid)
399 		head->res.class = cl;
400 }
401 
402 static struct tcf_proto_ops cls_mall_ops __read_mostly = {
403 	.kind		= "matchall",
404 	.classify	= mall_classify,
405 	.init		= mall_init,
406 	.destroy	= mall_destroy,
407 	.get		= mall_get,
408 	.change		= mall_change,
409 	.delete		= mall_delete,
410 	.walk		= mall_walk,
411 	.reoffload	= mall_reoffload,
412 	.dump		= mall_dump,
413 	.bind_class	= mall_bind_class,
414 	.owner		= THIS_MODULE,
415 };
416 
417 static int __init cls_mall_init(void)
418 {
419 	return register_tcf_proto_ops(&cls_mall_ops);
420 }
421 
422 static void __exit cls_mall_exit(void)
423 {
424 	unregister_tcf_proto_ops(&cls_mall_ops);
425 }
426 
427 module_init(cls_mall_init);
428 module_exit(cls_mall_exit);
429 
430 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
431 MODULE_DESCRIPTION("Match-all classifier");
432 MODULE_LICENSE("GPL v2");
433