xref: /linux/net/sched/act_api.c (revision c6fbb759)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_api.c	Packet action API.
4  *
5  * Author:	Jamal Hadi Salim
6  */
7 
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_pedit.h>
23 #include <net/act_api.h>
24 #include <net/netlink.h>
25 #include <net/flow_offload.h>
26 
27 #ifdef CONFIG_INET
28 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
29 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
30 #endif
31 
32 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
33 {
34 #ifdef CONFIG_INET
35 	if (static_branch_unlikely(&tcf_frag_xmit_count))
36 		return sch_frag_xmit_hook(skb, xmit);
37 #endif
38 
39 	return xmit(skb);
40 }
41 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
42 
43 static void tcf_action_goto_chain_exec(const struct tc_action *a,
44 				       struct tcf_result *res)
45 {
46 	const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
47 
48 	res->goto_tp = rcu_dereference_bh(chain->filter_chain);
49 }
50 
51 static void tcf_free_cookie_rcu(struct rcu_head *p)
52 {
53 	struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
54 
55 	kfree(cookie->data);
56 	kfree(cookie);
57 }
58 
59 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
60 				  struct tc_cookie *new_cookie)
61 {
62 	struct tc_cookie *old;
63 
64 	old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
65 	if (old)
66 		call_rcu(&old->rcu, tcf_free_cookie_rcu);
67 }
68 
69 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
70 			     struct tcf_chain **newchain,
71 			     struct netlink_ext_ack *extack)
72 {
73 	int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
74 	u32 chain_index;
75 
76 	if (!opcode)
77 		ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
78 	else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
79 		ret = 0;
80 	if (ret) {
81 		NL_SET_ERR_MSG(extack, "invalid control action");
82 		goto end;
83 	}
84 
85 	if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
86 		chain_index = action & TC_ACT_EXT_VAL_MASK;
87 		if (!tp || !newchain) {
88 			ret = -EINVAL;
89 			NL_SET_ERR_MSG(extack,
90 				       "can't goto NULL proto/chain");
91 			goto end;
92 		}
93 		*newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
94 		if (!*newchain) {
95 			ret = -ENOMEM;
96 			NL_SET_ERR_MSG(extack,
97 				       "can't allocate goto_chain");
98 		}
99 	}
100 end:
101 	return ret;
102 }
103 EXPORT_SYMBOL(tcf_action_check_ctrlact);
104 
105 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
106 					 struct tcf_chain *goto_chain)
107 {
108 	a->tcfa_action = action;
109 	goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
110 	return goto_chain;
111 }
112 EXPORT_SYMBOL(tcf_action_set_ctrlact);
113 
114 /* XXX: For standalone actions, we don't need a RCU grace period either, because
115  * actions are always connected to filters and filters are already destroyed in
116  * RCU callbacks, so after a RCU grace period actions are already disconnected
117  * from filters. Readers later can not find us.
118  */
119 static void free_tcf(struct tc_action *p)
120 {
121 	struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
122 
123 	free_percpu(p->cpu_bstats);
124 	free_percpu(p->cpu_bstats_hw);
125 	free_percpu(p->cpu_qstats);
126 
127 	tcf_set_action_cookie(&p->act_cookie, NULL);
128 	if (chain)
129 		tcf_chain_put_by_act(chain);
130 
131 	kfree(p);
132 }
133 
134 static void offload_action_hw_count_set(struct tc_action *act,
135 					u32 hw_count)
136 {
137 	act->in_hw_count = hw_count;
138 }
139 
140 static void offload_action_hw_count_inc(struct tc_action *act,
141 					u32 hw_count)
142 {
143 	act->in_hw_count += hw_count;
144 }
145 
146 static void offload_action_hw_count_dec(struct tc_action *act,
147 					u32 hw_count)
148 {
149 	act->in_hw_count = act->in_hw_count > hw_count ?
150 			   act->in_hw_count - hw_count : 0;
151 }
152 
153 static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
154 {
155 	if (is_tcf_pedit(act))
156 		return tcf_pedit_nkeys(act);
157 	else
158 		return 1;
159 }
160 
161 static bool tc_act_skip_hw(u32 flags)
162 {
163 	return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
164 }
165 
166 static bool tc_act_skip_sw(u32 flags)
167 {
168 	return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
169 }
170 
171 static bool tc_act_in_hw(struct tc_action *act)
172 {
173 	return !!act->in_hw_count;
174 }
175 
176 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
177 static bool tc_act_flags_valid(u32 flags)
178 {
179 	flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
180 
181 	return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
182 }
183 
184 static int offload_action_init(struct flow_offload_action *fl_action,
185 			       struct tc_action *act,
186 			       enum offload_act_command  cmd,
187 			       struct netlink_ext_ack *extack)
188 {
189 	int err;
190 
191 	fl_action->extack = extack;
192 	fl_action->command = cmd;
193 	fl_action->index = act->tcfa_index;
194 
195 	if (act->ops->offload_act_setup) {
196 		spin_lock_bh(&act->tcfa_lock);
197 		err = act->ops->offload_act_setup(act, fl_action, NULL,
198 						  false, extack);
199 		spin_unlock_bh(&act->tcfa_lock);
200 		return err;
201 	}
202 
203 	return -EOPNOTSUPP;
204 }
205 
206 static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
207 				     u32 *hw_count)
208 {
209 	int err;
210 
211 	err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
212 					  fl_act, NULL, NULL);
213 	if (err < 0)
214 		return err;
215 
216 	if (hw_count)
217 		*hw_count = err;
218 
219 	return 0;
220 }
221 
222 static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
223 					u32 *hw_count,
224 					flow_indr_block_bind_cb_t *cb,
225 					void *cb_priv)
226 {
227 	int err;
228 
229 	err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
230 	if (err < 0)
231 		return err;
232 
233 	if (hw_count)
234 		*hw_count = 1;
235 
236 	return 0;
237 }
238 
239 static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
240 				  u32 *hw_count,
241 				  flow_indr_block_bind_cb_t *cb,
242 				  void *cb_priv)
243 {
244 	return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
245 						 cb, cb_priv) :
246 		    tcf_action_offload_cmd_ex(fl_act, hw_count);
247 }
248 
249 static int tcf_action_offload_add_ex(struct tc_action *action,
250 				     struct netlink_ext_ack *extack,
251 				     flow_indr_block_bind_cb_t *cb,
252 				     void *cb_priv)
253 {
254 	bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
255 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
256 		[0] = action,
257 	};
258 	struct flow_offload_action *fl_action;
259 	u32 in_hw_count = 0;
260 	int num, err = 0;
261 
262 	if (tc_act_skip_hw(action->tcfa_flags))
263 		return 0;
264 
265 	num = tcf_offload_act_num_actions_single(action);
266 	fl_action = offload_action_alloc(num);
267 	if (!fl_action)
268 		return -ENOMEM;
269 
270 	err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
271 	if (err)
272 		goto fl_err;
273 
274 	err = tc_setup_action(&fl_action->action, actions, extack);
275 	if (err) {
276 		NL_SET_ERR_MSG_MOD(extack,
277 				   "Failed to setup tc actions for offload");
278 		goto fl_err;
279 	}
280 
281 	err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
282 	if (!err)
283 		cb ? offload_action_hw_count_inc(action, in_hw_count) :
284 		     offload_action_hw_count_set(action, in_hw_count);
285 
286 	if (skip_sw && !tc_act_in_hw(action))
287 		err = -EINVAL;
288 
289 	tc_cleanup_offload_action(&fl_action->action);
290 
291 fl_err:
292 	kfree(fl_action);
293 
294 	return err;
295 }
296 
297 /* offload the tc action after it is inserted */
298 static int tcf_action_offload_add(struct tc_action *action,
299 				  struct netlink_ext_ack *extack)
300 {
301 	return tcf_action_offload_add_ex(action, extack, NULL, NULL);
302 }
303 
304 int tcf_action_update_hw_stats(struct tc_action *action)
305 {
306 	struct flow_offload_action fl_act = {};
307 	int err;
308 
309 	if (!tc_act_in_hw(action))
310 		return -EOPNOTSUPP;
311 
312 	err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
313 	if (err)
314 		return err;
315 
316 	err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
317 	if (!err) {
318 		preempt_disable();
319 		tcf_action_stats_update(action, fl_act.stats.bytes,
320 					fl_act.stats.pkts,
321 					fl_act.stats.drops,
322 					fl_act.stats.lastused,
323 					true);
324 		preempt_enable();
325 		action->used_hw_stats = fl_act.stats.used_hw_stats;
326 		action->used_hw_stats_valid = true;
327 	} else {
328 		return -EOPNOTSUPP;
329 	}
330 
331 	return 0;
332 }
333 EXPORT_SYMBOL(tcf_action_update_hw_stats);
334 
335 static int tcf_action_offload_del_ex(struct tc_action *action,
336 				     flow_indr_block_bind_cb_t *cb,
337 				     void *cb_priv)
338 {
339 	struct flow_offload_action fl_act = {};
340 	u32 in_hw_count = 0;
341 	int err = 0;
342 
343 	if (!tc_act_in_hw(action))
344 		return 0;
345 
346 	err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
347 	if (err)
348 		return err;
349 
350 	err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
351 	if (err < 0)
352 		return err;
353 
354 	if (!cb && action->in_hw_count != in_hw_count)
355 		return -EINVAL;
356 
357 	/* do not need to update hw state when deleting action */
358 	if (cb && in_hw_count)
359 		offload_action_hw_count_dec(action, in_hw_count);
360 
361 	return 0;
362 }
363 
364 static int tcf_action_offload_del(struct tc_action *action)
365 {
366 	return tcf_action_offload_del_ex(action, NULL, NULL);
367 }
368 
369 static void tcf_action_cleanup(struct tc_action *p)
370 {
371 	tcf_action_offload_del(p);
372 	if (p->ops->cleanup)
373 		p->ops->cleanup(p);
374 
375 	gen_kill_estimator(&p->tcfa_rate_est);
376 	free_tcf(p);
377 }
378 
379 static int __tcf_action_put(struct tc_action *p, bool bind)
380 {
381 	struct tcf_idrinfo *idrinfo = p->idrinfo;
382 
383 	if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
384 		if (bind)
385 			atomic_dec(&p->tcfa_bindcnt);
386 		idr_remove(&idrinfo->action_idr, p->tcfa_index);
387 		mutex_unlock(&idrinfo->lock);
388 
389 		tcf_action_cleanup(p);
390 		return 1;
391 	}
392 
393 	if (bind)
394 		atomic_dec(&p->tcfa_bindcnt);
395 
396 	return 0;
397 }
398 
399 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
400 {
401 	int ret = 0;
402 
403 	/* Release with strict==1 and bind==0 is only called through act API
404 	 * interface (classifiers always bind). Only case when action with
405 	 * positive reference count and zero bind count can exist is when it was
406 	 * also created with act API (unbinding last classifier will destroy the
407 	 * action if it was created by classifier). So only case when bind count
408 	 * can be changed after initial check is when unbound action is
409 	 * destroyed by act API while classifier binds to action with same id
410 	 * concurrently. This result either creation of new action(same behavior
411 	 * as before), or reusing existing action if concurrent process
412 	 * increments reference count before action is deleted. Both scenarios
413 	 * are acceptable.
414 	 */
415 	if (p) {
416 		if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
417 			return -EPERM;
418 
419 		if (__tcf_action_put(p, bind))
420 			ret = ACT_P_DELETED;
421 	}
422 
423 	return ret;
424 }
425 
426 int tcf_idr_release(struct tc_action *a, bool bind)
427 {
428 	const struct tc_action_ops *ops = a->ops;
429 	int ret;
430 
431 	ret = __tcf_idr_release(a, bind, false);
432 	if (ret == ACT_P_DELETED)
433 		module_put(ops->owner);
434 	return ret;
435 }
436 EXPORT_SYMBOL(tcf_idr_release);
437 
438 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
439 {
440 	struct tc_cookie *act_cookie;
441 	u32 cookie_len = 0;
442 
443 	rcu_read_lock();
444 	act_cookie = rcu_dereference(act->act_cookie);
445 
446 	if (act_cookie)
447 		cookie_len = nla_total_size(act_cookie->len);
448 	rcu_read_unlock();
449 
450 	return  nla_total_size(0) /* action number nested */
451 		+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
452 		+ cookie_len /* TCA_ACT_COOKIE */
453 		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
454 		+ nla_total_size(0) /* TCA_ACT_STATS nested */
455 		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
456 		/* TCA_STATS_BASIC */
457 		+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
458 		/* TCA_STATS_PKT64 */
459 		+ nla_total_size_64bit(sizeof(u64))
460 		/* TCA_STATS_QUEUE */
461 		+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
462 		+ nla_total_size(0) /* TCA_OPTIONS nested */
463 		+ nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
464 }
465 
466 static size_t tcf_action_full_attrs_size(size_t sz)
467 {
468 	return NLMSG_HDRLEN                     /* struct nlmsghdr */
469 		+ sizeof(struct tcamsg)
470 		+ nla_total_size(0)             /* TCA_ACT_TAB nested */
471 		+ sz;
472 }
473 
474 static size_t tcf_action_fill_size(const struct tc_action *act)
475 {
476 	size_t sz = tcf_action_shared_attrs_size(act);
477 
478 	if (act->ops->get_fill_size)
479 		return act->ops->get_fill_size(act) + sz;
480 	return sz;
481 }
482 
483 static int
484 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
485 {
486 	unsigned char *b = skb_tail_pointer(skb);
487 	struct tc_cookie *cookie;
488 
489 	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
490 		goto nla_put_failure;
491 	if (tcf_action_copy_stats(skb, a, 0))
492 		goto nla_put_failure;
493 	if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
494 		goto nla_put_failure;
495 
496 	rcu_read_lock();
497 	cookie = rcu_dereference(a->act_cookie);
498 	if (cookie) {
499 		if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
500 			rcu_read_unlock();
501 			goto nla_put_failure;
502 		}
503 	}
504 	rcu_read_unlock();
505 
506 	return 0;
507 
508 nla_put_failure:
509 	nlmsg_trim(skb, b);
510 	return -1;
511 }
512 
513 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
514 			   struct netlink_callback *cb)
515 {
516 	int err = 0, index = -1, s_i = 0, n_i = 0;
517 	u32 act_flags = cb->args[2];
518 	unsigned long jiffy_since = cb->args[3];
519 	struct nlattr *nest;
520 	struct idr *idr = &idrinfo->action_idr;
521 	struct tc_action *p;
522 	unsigned long id = 1;
523 	unsigned long tmp;
524 
525 	mutex_lock(&idrinfo->lock);
526 
527 	s_i = cb->args[0];
528 
529 	idr_for_each_entry_ul(idr, p, tmp, id) {
530 		index++;
531 		if (index < s_i)
532 			continue;
533 		if (IS_ERR(p))
534 			continue;
535 
536 		if (jiffy_since &&
537 		    time_after(jiffy_since,
538 			       (unsigned long)p->tcfa_tm.lastuse))
539 			continue;
540 
541 		nest = nla_nest_start_noflag(skb, n_i);
542 		if (!nest) {
543 			index--;
544 			goto nla_put_failure;
545 		}
546 		err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
547 			tcf_action_dump_terse(skb, p, true) :
548 			tcf_action_dump_1(skb, p, 0, 0);
549 		if (err < 0) {
550 			index--;
551 			nlmsg_trim(skb, nest);
552 			goto done;
553 		}
554 		nla_nest_end(skb, nest);
555 		n_i++;
556 		if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
557 		    n_i >= TCA_ACT_MAX_PRIO)
558 			goto done;
559 	}
560 done:
561 	if (index >= 0)
562 		cb->args[0] = index + 1;
563 
564 	mutex_unlock(&idrinfo->lock);
565 	if (n_i) {
566 		if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
567 			cb->args[1] = n_i;
568 	}
569 	return n_i;
570 
571 nla_put_failure:
572 	nla_nest_cancel(skb, nest);
573 	goto done;
574 }
575 
576 static int tcf_idr_release_unsafe(struct tc_action *p)
577 {
578 	if (atomic_read(&p->tcfa_bindcnt) > 0)
579 		return -EPERM;
580 
581 	if (refcount_dec_and_test(&p->tcfa_refcnt)) {
582 		idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
583 		tcf_action_cleanup(p);
584 		return ACT_P_DELETED;
585 	}
586 
587 	return 0;
588 }
589 
590 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
591 			  const struct tc_action_ops *ops,
592 			  struct netlink_ext_ack *extack)
593 {
594 	struct nlattr *nest;
595 	int n_i = 0;
596 	int ret = -EINVAL;
597 	struct idr *idr = &idrinfo->action_idr;
598 	struct tc_action *p;
599 	unsigned long id = 1;
600 	unsigned long tmp;
601 
602 	nest = nla_nest_start_noflag(skb, 0);
603 	if (nest == NULL)
604 		goto nla_put_failure;
605 	if (nla_put_string(skb, TCA_KIND, ops->kind))
606 		goto nla_put_failure;
607 
608 	ret = 0;
609 	mutex_lock(&idrinfo->lock);
610 	idr_for_each_entry_ul(idr, p, tmp, id) {
611 		if (IS_ERR(p))
612 			continue;
613 		ret = tcf_idr_release_unsafe(p);
614 		if (ret == ACT_P_DELETED)
615 			module_put(ops->owner);
616 		else if (ret < 0)
617 			break;
618 		n_i++;
619 	}
620 	mutex_unlock(&idrinfo->lock);
621 	if (ret < 0) {
622 		if (n_i)
623 			NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
624 		else
625 			goto nla_put_failure;
626 	}
627 
628 	ret = nla_put_u32(skb, TCA_FCNT, n_i);
629 	if (ret)
630 		goto nla_put_failure;
631 	nla_nest_end(skb, nest);
632 
633 	return n_i;
634 nla_put_failure:
635 	nla_nest_cancel(skb, nest);
636 	return ret;
637 }
638 
639 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
640 		       struct netlink_callback *cb, int type,
641 		       const struct tc_action_ops *ops,
642 		       struct netlink_ext_ack *extack)
643 {
644 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
645 
646 	if (type == RTM_DELACTION) {
647 		return tcf_del_walker(idrinfo, skb, ops, extack);
648 	} else if (type == RTM_GETACTION) {
649 		return tcf_dump_walker(idrinfo, skb, cb);
650 	} else {
651 		WARN(1, "tcf_generic_walker: unknown command %d\n", type);
652 		NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
653 		return -EINVAL;
654 	}
655 }
656 EXPORT_SYMBOL(tcf_generic_walker);
657 
658 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
659 {
660 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
661 	struct tc_action *p;
662 
663 	mutex_lock(&idrinfo->lock);
664 	p = idr_find(&idrinfo->action_idr, index);
665 	if (IS_ERR(p))
666 		p = NULL;
667 	else if (p)
668 		refcount_inc(&p->tcfa_refcnt);
669 	mutex_unlock(&idrinfo->lock);
670 
671 	if (p) {
672 		*a = p;
673 		return true;
674 	}
675 	return false;
676 }
677 EXPORT_SYMBOL(tcf_idr_search);
678 
679 static int __tcf_generic_walker(struct net *net, struct sk_buff *skb,
680 				struct netlink_callback *cb, int type,
681 				const struct tc_action_ops *ops,
682 				struct netlink_ext_ack *extack)
683 {
684 	struct tc_action_net *tn = net_generic(net, ops->net_id);
685 
686 	if (unlikely(ops->walk))
687 		return ops->walk(net, skb, cb, type, ops, extack);
688 
689 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
690 }
691 
692 static int __tcf_idr_search(struct net *net,
693 			    const struct tc_action_ops *ops,
694 			    struct tc_action **a, u32 index)
695 {
696 	struct tc_action_net *tn = net_generic(net, ops->net_id);
697 
698 	if (unlikely(ops->lookup))
699 		return ops->lookup(net, a, index);
700 
701 	return tcf_idr_search(tn, a, index);
702 }
703 
704 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
705 {
706 	struct tc_action *p;
707 	int ret = 0;
708 
709 	mutex_lock(&idrinfo->lock);
710 	p = idr_find(&idrinfo->action_idr, index);
711 	if (!p) {
712 		mutex_unlock(&idrinfo->lock);
713 		return -ENOENT;
714 	}
715 
716 	if (!atomic_read(&p->tcfa_bindcnt)) {
717 		if (refcount_dec_and_test(&p->tcfa_refcnt)) {
718 			struct module *owner = p->ops->owner;
719 
720 			WARN_ON(p != idr_remove(&idrinfo->action_idr,
721 						p->tcfa_index));
722 			mutex_unlock(&idrinfo->lock);
723 
724 			tcf_action_cleanup(p);
725 			module_put(owner);
726 			return 0;
727 		}
728 		ret = 0;
729 	} else {
730 		ret = -EPERM;
731 	}
732 
733 	mutex_unlock(&idrinfo->lock);
734 	return ret;
735 }
736 
737 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
738 		   struct tc_action **a, const struct tc_action_ops *ops,
739 		   int bind, bool cpustats, u32 flags)
740 {
741 	struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
742 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
743 	int err = -ENOMEM;
744 
745 	if (unlikely(!p))
746 		return -ENOMEM;
747 	refcount_set(&p->tcfa_refcnt, 1);
748 	if (bind)
749 		atomic_set(&p->tcfa_bindcnt, 1);
750 
751 	if (cpustats) {
752 		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
753 		if (!p->cpu_bstats)
754 			goto err1;
755 		p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
756 		if (!p->cpu_bstats_hw)
757 			goto err2;
758 		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
759 		if (!p->cpu_qstats)
760 			goto err3;
761 	}
762 	gnet_stats_basic_sync_init(&p->tcfa_bstats);
763 	gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
764 	spin_lock_init(&p->tcfa_lock);
765 	p->tcfa_index = index;
766 	p->tcfa_tm.install = jiffies;
767 	p->tcfa_tm.lastuse = jiffies;
768 	p->tcfa_tm.firstuse = 0;
769 	p->tcfa_flags = flags;
770 	if (est) {
771 		err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
772 					&p->tcfa_rate_est,
773 					&p->tcfa_lock, false, est);
774 		if (err)
775 			goto err4;
776 	}
777 
778 	p->idrinfo = idrinfo;
779 	__module_get(ops->owner);
780 	p->ops = ops;
781 	*a = p;
782 	return 0;
783 err4:
784 	free_percpu(p->cpu_qstats);
785 err3:
786 	free_percpu(p->cpu_bstats_hw);
787 err2:
788 	free_percpu(p->cpu_bstats);
789 err1:
790 	kfree(p);
791 	return err;
792 }
793 EXPORT_SYMBOL(tcf_idr_create);
794 
795 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
796 			      struct nlattr *est, struct tc_action **a,
797 			      const struct tc_action_ops *ops, int bind,
798 			      u32 flags)
799 {
800 	/* Set cpustats according to actions flags. */
801 	return tcf_idr_create(tn, index, est, a, ops, bind,
802 			      !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
803 }
804 EXPORT_SYMBOL(tcf_idr_create_from_flags);
805 
806 /* Cleanup idr index that was allocated but not initialized. */
807 
808 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
809 {
810 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
811 
812 	mutex_lock(&idrinfo->lock);
813 	/* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
814 	WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
815 	mutex_unlock(&idrinfo->lock);
816 }
817 EXPORT_SYMBOL(tcf_idr_cleanup);
818 
819 /* Check if action with specified index exists. If actions is found, increments
820  * its reference and bind counters, and return 1. Otherwise insert temporary
821  * error pointer (to prevent concurrent users from inserting actions with same
822  * index) and return 0.
823  */
824 
825 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
826 			struct tc_action **a, int bind)
827 {
828 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
829 	struct tc_action *p;
830 	int ret;
831 
832 again:
833 	mutex_lock(&idrinfo->lock);
834 	if (*index) {
835 		p = idr_find(&idrinfo->action_idr, *index);
836 		if (IS_ERR(p)) {
837 			/* This means that another process allocated
838 			 * index but did not assign the pointer yet.
839 			 */
840 			mutex_unlock(&idrinfo->lock);
841 			goto again;
842 		}
843 
844 		if (p) {
845 			refcount_inc(&p->tcfa_refcnt);
846 			if (bind)
847 				atomic_inc(&p->tcfa_bindcnt);
848 			*a = p;
849 			ret = 1;
850 		} else {
851 			*a = NULL;
852 			ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
853 					    *index, GFP_KERNEL);
854 			if (!ret)
855 				idr_replace(&idrinfo->action_idr,
856 					    ERR_PTR(-EBUSY), *index);
857 		}
858 	} else {
859 		*index = 1;
860 		*a = NULL;
861 		ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
862 				    UINT_MAX, GFP_KERNEL);
863 		if (!ret)
864 			idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
865 				    *index);
866 	}
867 	mutex_unlock(&idrinfo->lock);
868 	return ret;
869 }
870 EXPORT_SYMBOL(tcf_idr_check_alloc);
871 
872 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
873 			 struct tcf_idrinfo *idrinfo)
874 {
875 	struct idr *idr = &idrinfo->action_idr;
876 	struct tc_action *p;
877 	int ret;
878 	unsigned long id = 1;
879 	unsigned long tmp;
880 
881 	idr_for_each_entry_ul(idr, p, tmp, id) {
882 		ret = __tcf_idr_release(p, false, true);
883 		if (ret == ACT_P_DELETED)
884 			module_put(ops->owner);
885 		else if (ret < 0)
886 			return;
887 	}
888 	idr_destroy(&idrinfo->action_idr);
889 }
890 EXPORT_SYMBOL(tcf_idrinfo_destroy);
891 
892 static LIST_HEAD(act_base);
893 static DEFINE_RWLOCK(act_mod_lock);
894 /* since act ops id is stored in pernet subsystem list,
895  * then there is no way to walk through only all the action
896  * subsystem, so we keep tc action pernet ops id for
897  * reoffload to walk through.
898  */
899 static LIST_HEAD(act_pernet_id_list);
900 static DEFINE_MUTEX(act_id_mutex);
901 struct tc_act_pernet_id {
902 	struct list_head list;
903 	unsigned int id;
904 };
905 
906 static int tcf_pernet_add_id_list(unsigned int id)
907 {
908 	struct tc_act_pernet_id *id_ptr;
909 	int ret = 0;
910 
911 	mutex_lock(&act_id_mutex);
912 	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
913 		if (id_ptr->id == id) {
914 			ret = -EEXIST;
915 			goto err_out;
916 		}
917 	}
918 
919 	id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
920 	if (!id_ptr) {
921 		ret = -ENOMEM;
922 		goto err_out;
923 	}
924 	id_ptr->id = id;
925 
926 	list_add_tail(&id_ptr->list, &act_pernet_id_list);
927 
928 err_out:
929 	mutex_unlock(&act_id_mutex);
930 	return ret;
931 }
932 
933 static void tcf_pernet_del_id_list(unsigned int id)
934 {
935 	struct tc_act_pernet_id *id_ptr;
936 
937 	mutex_lock(&act_id_mutex);
938 	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
939 		if (id_ptr->id == id) {
940 			list_del(&id_ptr->list);
941 			kfree(id_ptr);
942 			break;
943 		}
944 	}
945 	mutex_unlock(&act_id_mutex);
946 }
947 
948 int tcf_register_action(struct tc_action_ops *act,
949 			struct pernet_operations *ops)
950 {
951 	struct tc_action_ops *a;
952 	int ret;
953 
954 	if (!act->act || !act->dump || !act->init)
955 		return -EINVAL;
956 
957 	/* We have to register pernet ops before making the action ops visible,
958 	 * otherwise tcf_action_init_1() could get a partially initialized
959 	 * netns.
960 	 */
961 	ret = register_pernet_subsys(ops);
962 	if (ret)
963 		return ret;
964 
965 	if (ops->id) {
966 		ret = tcf_pernet_add_id_list(*ops->id);
967 		if (ret)
968 			goto err_id;
969 	}
970 
971 	write_lock(&act_mod_lock);
972 	list_for_each_entry(a, &act_base, head) {
973 		if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
974 			ret = -EEXIST;
975 			goto err_out;
976 		}
977 	}
978 	list_add_tail(&act->head, &act_base);
979 	write_unlock(&act_mod_lock);
980 
981 	return 0;
982 
983 err_out:
984 	write_unlock(&act_mod_lock);
985 	if (ops->id)
986 		tcf_pernet_del_id_list(*ops->id);
987 err_id:
988 	unregister_pernet_subsys(ops);
989 	return ret;
990 }
991 EXPORT_SYMBOL(tcf_register_action);
992 
993 int tcf_unregister_action(struct tc_action_ops *act,
994 			  struct pernet_operations *ops)
995 {
996 	struct tc_action_ops *a;
997 	int err = -ENOENT;
998 
999 	write_lock(&act_mod_lock);
1000 	list_for_each_entry(a, &act_base, head) {
1001 		if (a == act) {
1002 			list_del(&act->head);
1003 			err = 0;
1004 			break;
1005 		}
1006 	}
1007 	write_unlock(&act_mod_lock);
1008 	if (!err) {
1009 		unregister_pernet_subsys(ops);
1010 		if (ops->id)
1011 			tcf_pernet_del_id_list(*ops->id);
1012 	}
1013 	return err;
1014 }
1015 EXPORT_SYMBOL(tcf_unregister_action);
1016 
1017 /* lookup by name */
1018 static struct tc_action_ops *tc_lookup_action_n(char *kind)
1019 {
1020 	struct tc_action_ops *a, *res = NULL;
1021 
1022 	if (kind) {
1023 		read_lock(&act_mod_lock);
1024 		list_for_each_entry(a, &act_base, head) {
1025 			if (strcmp(kind, a->kind) == 0) {
1026 				if (try_module_get(a->owner))
1027 					res = a;
1028 				break;
1029 			}
1030 		}
1031 		read_unlock(&act_mod_lock);
1032 	}
1033 	return res;
1034 }
1035 
1036 /* lookup by nlattr */
1037 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
1038 {
1039 	struct tc_action_ops *a, *res = NULL;
1040 
1041 	if (kind) {
1042 		read_lock(&act_mod_lock);
1043 		list_for_each_entry(a, &act_base, head) {
1044 			if (nla_strcmp(kind, a->kind) == 0) {
1045 				if (try_module_get(a->owner))
1046 					res = a;
1047 				break;
1048 			}
1049 		}
1050 		read_unlock(&act_mod_lock);
1051 	}
1052 	return res;
1053 }
1054 
1055 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
1056 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
1057 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
1058 		    int nr_actions, struct tcf_result *res)
1059 {
1060 	u32 jmp_prgcnt = 0;
1061 	u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
1062 	int i;
1063 	int ret = TC_ACT_OK;
1064 
1065 	if (skb_skip_tc_classify(skb))
1066 		return TC_ACT_OK;
1067 
1068 restart_act_graph:
1069 	for (i = 0; i < nr_actions; i++) {
1070 		const struct tc_action *a = actions[i];
1071 		int repeat_ttl;
1072 
1073 		if (jmp_prgcnt > 0) {
1074 			jmp_prgcnt -= 1;
1075 			continue;
1076 		}
1077 
1078 		if (tc_act_skip_sw(a->tcfa_flags))
1079 			continue;
1080 
1081 		repeat_ttl = 32;
1082 repeat:
1083 		ret = a->ops->act(skb, a, res);
1084 		if (unlikely(ret == TC_ACT_REPEAT)) {
1085 			if (--repeat_ttl != 0)
1086 				goto repeat;
1087 			/* suspicious opcode, stop pipeline */
1088 			net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
1089 			return TC_ACT_OK;
1090 		}
1091 		if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
1092 			jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
1093 			if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
1094 				/* faulty opcode, stop pipeline */
1095 				return TC_ACT_OK;
1096 			} else {
1097 				jmp_ttl -= 1;
1098 				if (jmp_ttl > 0)
1099 					goto restart_act_graph;
1100 				else /* faulty graph, stop pipeline */
1101 					return TC_ACT_OK;
1102 			}
1103 		} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
1104 			if (unlikely(!rcu_access_pointer(a->goto_chain))) {
1105 				net_warn_ratelimited("can't go to NULL chain!\n");
1106 				return TC_ACT_SHOT;
1107 			}
1108 			tcf_action_goto_chain_exec(a, res);
1109 		}
1110 
1111 		if (ret != TC_ACT_PIPE)
1112 			break;
1113 	}
1114 
1115 	return ret;
1116 }
1117 EXPORT_SYMBOL(tcf_action_exec);
1118 
1119 int tcf_action_destroy(struct tc_action *actions[], int bind)
1120 {
1121 	const struct tc_action_ops *ops;
1122 	struct tc_action *a;
1123 	int ret = 0, i;
1124 
1125 	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1126 		a = actions[i];
1127 		actions[i] = NULL;
1128 		ops = a->ops;
1129 		ret = __tcf_idr_release(a, bind, true);
1130 		if (ret == ACT_P_DELETED)
1131 			module_put(ops->owner);
1132 		else if (ret < 0)
1133 			return ret;
1134 	}
1135 	return ret;
1136 }
1137 
1138 static int tcf_action_put(struct tc_action *p)
1139 {
1140 	return __tcf_action_put(p, false);
1141 }
1142 
1143 /* Put all actions in this array, skip those NULL's. */
1144 static void tcf_action_put_many(struct tc_action *actions[])
1145 {
1146 	int i;
1147 
1148 	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1149 		struct tc_action *a = actions[i];
1150 		const struct tc_action_ops *ops;
1151 
1152 		if (!a)
1153 			continue;
1154 		ops = a->ops;
1155 		if (tcf_action_put(a))
1156 			module_put(ops->owner);
1157 	}
1158 }
1159 
1160 int
1161 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1162 {
1163 	return a->ops->dump(skb, a, bind, ref);
1164 }
1165 
1166 int
1167 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1168 {
1169 	int err = -EINVAL;
1170 	unsigned char *b = skb_tail_pointer(skb);
1171 	struct nlattr *nest;
1172 	u32 flags;
1173 
1174 	if (tcf_action_dump_terse(skb, a, false))
1175 		goto nla_put_failure;
1176 
1177 	if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
1178 	    nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
1179 			       a->hw_stats, TCA_ACT_HW_STATS_ANY))
1180 		goto nla_put_failure;
1181 
1182 	if (a->used_hw_stats_valid &&
1183 	    nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
1184 			       a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
1185 		goto nla_put_failure;
1186 
1187 	flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
1188 	if (flags &&
1189 	    nla_put_bitfield32(skb, TCA_ACT_FLAGS,
1190 			       flags, flags))
1191 		goto nla_put_failure;
1192 
1193 	if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
1194 		goto nla_put_failure;
1195 
1196 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1197 	if (nest == NULL)
1198 		goto nla_put_failure;
1199 	err = tcf_action_dump_old(skb, a, bind, ref);
1200 	if (err > 0) {
1201 		nla_nest_end(skb, nest);
1202 		return err;
1203 	}
1204 
1205 nla_put_failure:
1206 	nlmsg_trim(skb, b);
1207 	return -1;
1208 }
1209 EXPORT_SYMBOL(tcf_action_dump_1);
1210 
1211 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
1212 		    int bind, int ref, bool terse)
1213 {
1214 	struct tc_action *a;
1215 	int err = -EINVAL, i;
1216 	struct nlattr *nest;
1217 
1218 	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1219 		a = actions[i];
1220 		nest = nla_nest_start_noflag(skb, i + 1);
1221 		if (nest == NULL)
1222 			goto nla_put_failure;
1223 		err = terse ? tcf_action_dump_terse(skb, a, false) :
1224 			tcf_action_dump_1(skb, a, bind, ref);
1225 		if (err < 0)
1226 			goto errout;
1227 		nla_nest_end(skb, nest);
1228 	}
1229 
1230 	return 0;
1231 
1232 nla_put_failure:
1233 	err = -EINVAL;
1234 errout:
1235 	nla_nest_cancel(skb, nest);
1236 	return err;
1237 }
1238 
1239 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
1240 {
1241 	struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
1242 	if (!c)
1243 		return NULL;
1244 
1245 	c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
1246 	if (!c->data) {
1247 		kfree(c);
1248 		return NULL;
1249 	}
1250 	c->len = nla_len(tb[TCA_ACT_COOKIE]);
1251 
1252 	return c;
1253 }
1254 
1255 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
1256 {
1257 	struct nla_bitfield32 hw_stats_bf;
1258 
1259 	/* If the user did not pass the attr, that means he does
1260 	 * not care about the type. Return "any" in that case
1261 	 * which is setting on all supported types.
1262 	 */
1263 	if (!hw_stats_attr)
1264 		return TCA_ACT_HW_STATS_ANY;
1265 	hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
1266 	return hw_stats_bf.value;
1267 }
1268 
1269 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
1270 	[TCA_ACT_KIND]		= { .type = NLA_STRING },
1271 	[TCA_ACT_INDEX]		= { .type = NLA_U32 },
1272 	[TCA_ACT_COOKIE]	= { .type = NLA_BINARY,
1273 				    .len = TC_COOKIE_MAX_SIZE },
1274 	[TCA_ACT_OPTIONS]	= { .type = NLA_NESTED },
1275 	[TCA_ACT_FLAGS]		= NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
1276 							TCA_ACT_FLAGS_SKIP_HW |
1277 							TCA_ACT_FLAGS_SKIP_SW),
1278 	[TCA_ACT_HW_STATS]	= NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
1279 };
1280 
1281 void tcf_idr_insert_many(struct tc_action *actions[])
1282 {
1283 	int i;
1284 
1285 	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1286 		struct tc_action *a = actions[i];
1287 		struct tcf_idrinfo *idrinfo;
1288 
1289 		if (!a)
1290 			continue;
1291 		idrinfo = a->idrinfo;
1292 		mutex_lock(&idrinfo->lock);
1293 		/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
1294 		 * it is just created, otherwise this is just a nop.
1295 		 */
1296 		idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
1297 		mutex_unlock(&idrinfo->lock);
1298 	}
1299 }
1300 
1301 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
1302 					 bool rtnl_held,
1303 					 struct netlink_ext_ack *extack)
1304 {
1305 	struct nlattr *tb[TCA_ACT_MAX + 1];
1306 	struct tc_action_ops *a_o;
1307 	char act_name[IFNAMSIZ];
1308 	struct nlattr *kind;
1309 	int err;
1310 
1311 	if (!police) {
1312 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1313 						  tcf_action_policy, extack);
1314 		if (err < 0)
1315 			return ERR_PTR(err);
1316 		err = -EINVAL;
1317 		kind = tb[TCA_ACT_KIND];
1318 		if (!kind) {
1319 			NL_SET_ERR_MSG(extack, "TC action kind must be specified");
1320 			return ERR_PTR(err);
1321 		}
1322 		if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
1323 			NL_SET_ERR_MSG(extack, "TC action name too long");
1324 			return ERR_PTR(err);
1325 		}
1326 	} else {
1327 		if (strlcpy(act_name, "police", IFNAMSIZ) >= IFNAMSIZ) {
1328 			NL_SET_ERR_MSG(extack, "TC action name too long");
1329 			return ERR_PTR(-EINVAL);
1330 		}
1331 	}
1332 
1333 	a_o = tc_lookup_action_n(act_name);
1334 	if (a_o == NULL) {
1335 #ifdef CONFIG_MODULES
1336 		if (rtnl_held)
1337 			rtnl_unlock();
1338 		request_module("act_%s", act_name);
1339 		if (rtnl_held)
1340 			rtnl_lock();
1341 
1342 		a_o = tc_lookup_action_n(act_name);
1343 
1344 		/* We dropped the RTNL semaphore in order to
1345 		 * perform the module load.  So, even if we
1346 		 * succeeded in loading the module we have to
1347 		 * tell the caller to replay the request.  We
1348 		 * indicate this using -EAGAIN.
1349 		 */
1350 		if (a_o != NULL) {
1351 			module_put(a_o->owner);
1352 			return ERR_PTR(-EAGAIN);
1353 		}
1354 #endif
1355 		NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1356 		return ERR_PTR(-ENOENT);
1357 	}
1358 
1359 	return a_o;
1360 }
1361 
1362 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1363 				    struct nlattr *nla, struct nlattr *est,
1364 				    struct tc_action_ops *a_o, int *init_res,
1365 				    u32 flags, struct netlink_ext_ack *extack)
1366 {
1367 	bool police = flags & TCA_ACT_FLAGS_POLICE;
1368 	struct nla_bitfield32 userflags = { 0, 0 };
1369 	u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1370 	struct nlattr *tb[TCA_ACT_MAX + 1];
1371 	struct tc_cookie *cookie = NULL;
1372 	struct tc_action *a;
1373 	int err;
1374 
1375 	/* backward compatibility for policer */
1376 	if (!police) {
1377 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1378 						  tcf_action_policy, extack);
1379 		if (err < 0)
1380 			return ERR_PTR(err);
1381 		if (tb[TCA_ACT_COOKIE]) {
1382 			cookie = nla_memdup_cookie(tb);
1383 			if (!cookie) {
1384 				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1385 				err = -ENOMEM;
1386 				goto err_out;
1387 			}
1388 		}
1389 		hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1390 		if (tb[TCA_ACT_FLAGS]) {
1391 			userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1392 			if (!tc_act_flags_valid(userflags.value)) {
1393 				err = -EINVAL;
1394 				goto err_out;
1395 			}
1396 		}
1397 
1398 		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1399 				userflags.value | flags, extack);
1400 	} else {
1401 		err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1402 				extack);
1403 	}
1404 	if (err < 0)
1405 		goto err_out;
1406 	*init_res = err;
1407 
1408 	if (!police && tb[TCA_ACT_COOKIE])
1409 		tcf_set_action_cookie(&a->act_cookie, cookie);
1410 
1411 	if (!police)
1412 		a->hw_stats = hw_stats;
1413 
1414 	return a;
1415 
1416 err_out:
1417 	if (cookie) {
1418 		kfree(cookie->data);
1419 		kfree(cookie);
1420 	}
1421 	return ERR_PTR(err);
1422 }
1423 
1424 static bool tc_act_bind(u32 flags)
1425 {
1426 	return !!(flags & TCA_ACT_FLAGS_BIND);
1427 }
1428 
1429 /* Returns numbers of initialized actions or negative error. */
1430 
1431 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1432 		    struct nlattr *est, struct tc_action *actions[],
1433 		    int init_res[], size_t *attr_size,
1434 		    u32 flags, u32 fl_flags,
1435 		    struct netlink_ext_ack *extack)
1436 {
1437 	struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1438 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1439 	struct tc_action *act;
1440 	size_t sz = 0;
1441 	int err;
1442 	int i;
1443 
1444 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1445 					  extack);
1446 	if (err < 0)
1447 		return err;
1448 
1449 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1450 		struct tc_action_ops *a_o;
1451 
1452 		a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE,
1453 					 !(flags & TCA_ACT_FLAGS_NO_RTNL),
1454 					 extack);
1455 		if (IS_ERR(a_o)) {
1456 			err = PTR_ERR(a_o);
1457 			goto err_mod;
1458 		}
1459 		ops[i - 1] = a_o;
1460 	}
1461 
1462 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1463 		act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1464 					&init_res[i - 1], flags, extack);
1465 		if (IS_ERR(act)) {
1466 			err = PTR_ERR(act);
1467 			goto err;
1468 		}
1469 		sz += tcf_action_fill_size(act);
1470 		/* Start from index 0 */
1471 		actions[i - 1] = act;
1472 		if (tc_act_bind(flags)) {
1473 			bool skip_sw = tc_skip_sw(fl_flags);
1474 			bool skip_hw = tc_skip_hw(fl_flags);
1475 
1476 			if (tc_act_bind(act->tcfa_flags))
1477 				continue;
1478 			if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
1479 			    skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
1480 				NL_SET_ERR_MSG(extack,
1481 					       "Mismatch between action and filter offload flags");
1482 				err = -EINVAL;
1483 				goto err;
1484 			}
1485 		} else {
1486 			err = tcf_action_offload_add(act, extack);
1487 			if (tc_act_skip_sw(act->tcfa_flags) && err)
1488 				goto err;
1489 		}
1490 	}
1491 
1492 	/* We have to commit them all together, because if any error happened in
1493 	 * between, we could not handle the failure gracefully.
1494 	 */
1495 	tcf_idr_insert_many(actions);
1496 
1497 	*attr_size = tcf_action_full_attrs_size(sz);
1498 	err = i - 1;
1499 	goto err_mod;
1500 
1501 err:
1502 	tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1503 err_mod:
1504 	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1505 		if (ops[i])
1506 			module_put(ops[i]->owner);
1507 	}
1508 	return err;
1509 }
1510 
1511 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1512 			     u64 drops, bool hw)
1513 {
1514 	if (a->cpu_bstats) {
1515 		_bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1516 
1517 		this_cpu_ptr(a->cpu_qstats)->drops += drops;
1518 
1519 		if (hw)
1520 			_bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
1521 				       bytes, packets);
1522 		return;
1523 	}
1524 
1525 	_bstats_update(&a->tcfa_bstats, bytes, packets);
1526 	a->tcfa_qstats.drops += drops;
1527 	if (hw)
1528 		_bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1529 }
1530 EXPORT_SYMBOL(tcf_action_update_stats);
1531 
1532 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1533 			  int compat_mode)
1534 {
1535 	int err = 0;
1536 	struct gnet_dump d;
1537 
1538 	if (p == NULL)
1539 		goto errout;
1540 
1541 	/* update hw stats for this action */
1542 	tcf_action_update_hw_stats(p);
1543 
1544 	/* compat_mode being true specifies a call that is supposed
1545 	 * to add additional backward compatibility statistic TLVs.
1546 	 */
1547 	if (compat_mode) {
1548 		if (p->type == TCA_OLD_COMPAT)
1549 			err = gnet_stats_start_copy_compat(skb, 0,
1550 							   TCA_STATS,
1551 							   TCA_XSTATS,
1552 							   &p->tcfa_lock, &d,
1553 							   TCA_PAD);
1554 		else
1555 			return 0;
1556 	} else
1557 		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1558 					    &p->tcfa_lock, &d, TCA_ACT_PAD);
1559 
1560 	if (err < 0)
1561 		goto errout;
1562 
1563 	if (gnet_stats_copy_basic(&d, p->cpu_bstats,
1564 				  &p->tcfa_bstats, false) < 0 ||
1565 	    gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
1566 				     &p->tcfa_bstats_hw, false) < 0 ||
1567 	    gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1568 	    gnet_stats_copy_queue(&d, p->cpu_qstats,
1569 				  &p->tcfa_qstats,
1570 				  p->tcfa_qstats.qlen) < 0)
1571 		goto errout;
1572 
1573 	if (gnet_stats_finish_copy(&d) < 0)
1574 		goto errout;
1575 
1576 	return 0;
1577 
1578 errout:
1579 	return -1;
1580 }
1581 
1582 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1583 			u32 portid, u32 seq, u16 flags, int event, int bind,
1584 			int ref)
1585 {
1586 	struct tcamsg *t;
1587 	struct nlmsghdr *nlh;
1588 	unsigned char *b = skb_tail_pointer(skb);
1589 	struct nlattr *nest;
1590 
1591 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1592 	if (!nlh)
1593 		goto out_nlmsg_trim;
1594 	t = nlmsg_data(nlh);
1595 	t->tca_family = AF_UNSPEC;
1596 	t->tca__pad1 = 0;
1597 	t->tca__pad2 = 0;
1598 
1599 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1600 	if (!nest)
1601 		goto out_nlmsg_trim;
1602 
1603 	if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1604 		goto out_nlmsg_trim;
1605 
1606 	nla_nest_end(skb, nest);
1607 
1608 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1609 	return skb->len;
1610 
1611 out_nlmsg_trim:
1612 	nlmsg_trim(skb, b);
1613 	return -1;
1614 }
1615 
1616 static int
1617 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1618 	       struct tc_action *actions[], int event,
1619 	       struct netlink_ext_ack *extack)
1620 {
1621 	struct sk_buff *skb;
1622 
1623 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1624 	if (!skb)
1625 		return -ENOBUFS;
1626 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1627 			 0, 1) <= 0) {
1628 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1629 		kfree_skb(skb);
1630 		return -EINVAL;
1631 	}
1632 
1633 	return rtnl_unicast(skb, net, portid);
1634 }
1635 
1636 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1637 					  struct nlmsghdr *n, u32 portid,
1638 					  struct netlink_ext_ack *extack)
1639 {
1640 	struct nlattr *tb[TCA_ACT_MAX + 1];
1641 	const struct tc_action_ops *ops;
1642 	struct tc_action *a;
1643 	int index;
1644 	int err;
1645 
1646 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1647 					  tcf_action_policy, extack);
1648 	if (err < 0)
1649 		goto err_out;
1650 
1651 	err = -EINVAL;
1652 	if (tb[TCA_ACT_INDEX] == NULL ||
1653 	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1654 		NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1655 		goto err_out;
1656 	}
1657 	index = nla_get_u32(tb[TCA_ACT_INDEX]);
1658 
1659 	err = -EINVAL;
1660 	ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1661 	if (!ops) { /* could happen in batch of actions */
1662 		NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1663 		goto err_out;
1664 	}
1665 	err = -ENOENT;
1666 	if (__tcf_idr_search(net, ops, &a, index) == 0) {
1667 		NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1668 		goto err_mod;
1669 	}
1670 
1671 	module_put(ops->owner);
1672 	return a;
1673 
1674 err_mod:
1675 	module_put(ops->owner);
1676 err_out:
1677 	return ERR_PTR(err);
1678 }
1679 
1680 static int tca_action_flush(struct net *net, struct nlattr *nla,
1681 			    struct nlmsghdr *n, u32 portid,
1682 			    struct netlink_ext_ack *extack)
1683 {
1684 	struct sk_buff *skb;
1685 	unsigned char *b;
1686 	struct nlmsghdr *nlh;
1687 	struct tcamsg *t;
1688 	struct netlink_callback dcb;
1689 	struct nlattr *nest;
1690 	struct nlattr *tb[TCA_ACT_MAX + 1];
1691 	const struct tc_action_ops *ops;
1692 	struct nlattr *kind;
1693 	int err = -ENOMEM;
1694 
1695 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1696 	if (!skb)
1697 		return err;
1698 
1699 	b = skb_tail_pointer(skb);
1700 
1701 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1702 					  tcf_action_policy, extack);
1703 	if (err < 0)
1704 		goto err_out;
1705 
1706 	err = -EINVAL;
1707 	kind = tb[TCA_ACT_KIND];
1708 	ops = tc_lookup_action(kind);
1709 	if (!ops) { /*some idjot trying to flush unknown action */
1710 		NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1711 		goto err_out;
1712 	}
1713 
1714 	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1715 			sizeof(*t), 0);
1716 	if (!nlh) {
1717 		NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1718 		goto out_module_put;
1719 	}
1720 	t = nlmsg_data(nlh);
1721 	t->tca_family = AF_UNSPEC;
1722 	t->tca__pad1 = 0;
1723 	t->tca__pad2 = 0;
1724 
1725 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1726 	if (!nest) {
1727 		NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1728 		goto out_module_put;
1729 	}
1730 
1731 	err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack);
1732 	if (err <= 0) {
1733 		nla_nest_cancel(skb, nest);
1734 		goto out_module_put;
1735 	}
1736 
1737 	nla_nest_end(skb, nest);
1738 
1739 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1740 	nlh->nlmsg_flags |= NLM_F_ROOT;
1741 	module_put(ops->owner);
1742 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1743 			     n->nlmsg_flags & NLM_F_ECHO);
1744 	if (err < 0)
1745 		NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1746 
1747 	return err;
1748 
1749 out_module_put:
1750 	module_put(ops->owner);
1751 err_out:
1752 	kfree_skb(skb);
1753 	return err;
1754 }
1755 
1756 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1757 {
1758 	int i;
1759 
1760 	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1761 		struct tc_action *a = actions[i];
1762 		const struct tc_action_ops *ops = a->ops;
1763 		/* Actions can be deleted concurrently so we must save their
1764 		 * type and id to search again after reference is released.
1765 		 */
1766 		struct tcf_idrinfo *idrinfo = a->idrinfo;
1767 		u32 act_index = a->tcfa_index;
1768 
1769 		actions[i] = NULL;
1770 		if (tcf_action_put(a)) {
1771 			/* last reference, action was deleted concurrently */
1772 			module_put(ops->owner);
1773 		} else  {
1774 			int ret;
1775 
1776 			/* now do the delete */
1777 			ret = tcf_idr_delete_index(idrinfo, act_index);
1778 			if (ret < 0)
1779 				return ret;
1780 		}
1781 	}
1782 	return 0;
1783 }
1784 
1785 static int
1786 tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
1787 {
1788 	size_t attr_size = tcf_action_fill_size(action);
1789 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
1790 		[0] = action,
1791 	};
1792 	const struct tc_action_ops *ops = action->ops;
1793 	struct sk_buff *skb;
1794 	int ret;
1795 
1796 	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1797 			GFP_KERNEL);
1798 	if (!skb)
1799 		return -ENOBUFS;
1800 
1801 	if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) {
1802 		kfree_skb(skb);
1803 		return -EINVAL;
1804 	}
1805 
1806 	ret = tcf_idr_release_unsafe(action);
1807 	if (ret == ACT_P_DELETED) {
1808 		module_put(ops->owner);
1809 		ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0);
1810 	} else {
1811 		kfree_skb(skb);
1812 	}
1813 
1814 	return ret;
1815 }
1816 
1817 int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
1818 			    void *cb_priv, bool add)
1819 {
1820 	struct tc_act_pernet_id *id_ptr;
1821 	struct tcf_idrinfo *idrinfo;
1822 	struct tc_action_net *tn;
1823 	struct tc_action *p;
1824 	unsigned int act_id;
1825 	unsigned long tmp;
1826 	unsigned long id;
1827 	struct idr *idr;
1828 	struct net *net;
1829 	int ret;
1830 
1831 	if (!cb)
1832 		return -EINVAL;
1833 
1834 	down_read(&net_rwsem);
1835 	mutex_lock(&act_id_mutex);
1836 
1837 	for_each_net(net) {
1838 		list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
1839 			act_id = id_ptr->id;
1840 			tn = net_generic(net, act_id);
1841 			if (!tn)
1842 				continue;
1843 			idrinfo = tn->idrinfo;
1844 			if (!idrinfo)
1845 				continue;
1846 
1847 			mutex_lock(&idrinfo->lock);
1848 			idr = &idrinfo->action_idr;
1849 			idr_for_each_entry_ul(idr, p, tmp, id) {
1850 				if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
1851 					continue;
1852 				if (add) {
1853 					tcf_action_offload_add_ex(p, NULL, cb,
1854 								  cb_priv);
1855 					continue;
1856 				}
1857 
1858 				/* cb unregister to update hw count */
1859 				ret = tcf_action_offload_del_ex(p, cb, cb_priv);
1860 				if (ret < 0)
1861 					continue;
1862 				if (tc_act_skip_sw(p->tcfa_flags) &&
1863 				    !tc_act_in_hw(p))
1864 					tcf_reoffload_del_notify(net, p);
1865 			}
1866 			mutex_unlock(&idrinfo->lock);
1867 		}
1868 	}
1869 	mutex_unlock(&act_id_mutex);
1870 	up_read(&net_rwsem);
1871 
1872 	return 0;
1873 }
1874 
1875 static int
1876 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1877 	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1878 {
1879 	int ret;
1880 	struct sk_buff *skb;
1881 
1882 	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1883 			GFP_KERNEL);
1884 	if (!skb)
1885 		return -ENOBUFS;
1886 
1887 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1888 			 0, 2) <= 0) {
1889 		NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1890 		kfree_skb(skb);
1891 		return -EINVAL;
1892 	}
1893 
1894 	/* now do the delete */
1895 	ret = tcf_action_delete(net, actions);
1896 	if (ret < 0) {
1897 		NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1898 		kfree_skb(skb);
1899 		return ret;
1900 	}
1901 
1902 	ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1903 			     n->nlmsg_flags & NLM_F_ECHO);
1904 	return ret;
1905 }
1906 
1907 static int
1908 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1909 	      u32 portid, int event, struct netlink_ext_ack *extack)
1910 {
1911 	int i, ret;
1912 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1913 	struct tc_action *act;
1914 	size_t attr_size = 0;
1915 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1916 
1917 	ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1918 					  extack);
1919 	if (ret < 0)
1920 		return ret;
1921 
1922 	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1923 		if (tb[1])
1924 			return tca_action_flush(net, tb[1], n, portid, extack);
1925 
1926 		NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1927 		return -EINVAL;
1928 	}
1929 
1930 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1931 		act = tcf_action_get_1(net, tb[i], n, portid, extack);
1932 		if (IS_ERR(act)) {
1933 			ret = PTR_ERR(act);
1934 			goto err;
1935 		}
1936 		attr_size += tcf_action_fill_size(act);
1937 		actions[i - 1] = act;
1938 	}
1939 
1940 	attr_size = tcf_action_full_attrs_size(attr_size);
1941 
1942 	if (event == RTM_GETACTION)
1943 		ret = tcf_get_notify(net, portid, n, actions, event, extack);
1944 	else { /* delete */
1945 		ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1946 		if (ret)
1947 			goto err;
1948 		return 0;
1949 	}
1950 err:
1951 	tcf_action_put_many(actions);
1952 	return ret;
1953 }
1954 
1955 static int
1956 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1957 	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1958 {
1959 	struct sk_buff *skb;
1960 
1961 	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1962 			GFP_KERNEL);
1963 	if (!skb)
1964 		return -ENOBUFS;
1965 
1966 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1967 			 RTM_NEWACTION, 0, 0) <= 0) {
1968 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1969 		kfree_skb(skb);
1970 		return -EINVAL;
1971 	}
1972 
1973 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1974 			      n->nlmsg_flags & NLM_F_ECHO);
1975 }
1976 
1977 static int tcf_action_add(struct net *net, struct nlattr *nla,
1978 			  struct nlmsghdr *n, u32 portid, u32 flags,
1979 			  struct netlink_ext_ack *extack)
1980 {
1981 	size_t attr_size = 0;
1982 	int loop, ret, i;
1983 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1984 	int init_res[TCA_ACT_MAX_PRIO] = {};
1985 
1986 	for (loop = 0; loop < 10; loop++) {
1987 		ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
1988 				      &attr_size, flags, 0, extack);
1989 		if (ret != -EAGAIN)
1990 			break;
1991 	}
1992 
1993 	if (ret < 0)
1994 		return ret;
1995 	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
1996 
1997 	/* only put existing actions */
1998 	for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
1999 		if (init_res[i] == ACT_P_CREATED)
2000 			actions[i] = NULL;
2001 	tcf_action_put_many(actions);
2002 
2003 	return ret;
2004 }
2005 
2006 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
2007 	[TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
2008 						 TCA_ACT_FLAG_TERSE_DUMP),
2009 	[TCA_ROOT_TIME_DELTA]      = { .type = NLA_U32 },
2010 };
2011 
2012 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
2013 			 struct netlink_ext_ack *extack)
2014 {
2015 	struct net *net = sock_net(skb->sk);
2016 	struct nlattr *tca[TCA_ROOT_MAX + 1];
2017 	u32 portid = NETLINK_CB(skb).portid;
2018 	u32 flags = 0;
2019 	int ret = 0;
2020 
2021 	if ((n->nlmsg_type != RTM_GETACTION) &&
2022 	    !netlink_capable(skb, CAP_NET_ADMIN))
2023 		return -EPERM;
2024 
2025 	ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
2026 				     TCA_ROOT_MAX, NULL, extack);
2027 	if (ret < 0)
2028 		return ret;
2029 
2030 	if (tca[TCA_ACT_TAB] == NULL) {
2031 		NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
2032 		return -EINVAL;
2033 	}
2034 
2035 	/* n->nlmsg_flags & NLM_F_CREATE */
2036 	switch (n->nlmsg_type) {
2037 	case RTM_NEWACTION:
2038 		/* we are going to assume all other flags
2039 		 * imply create only if it doesn't exist
2040 		 * Note that CREATE | EXCL implies that
2041 		 * but since we want avoid ambiguity (eg when flags
2042 		 * is zero) then just set this
2043 		 */
2044 		if (n->nlmsg_flags & NLM_F_REPLACE)
2045 			flags = TCA_ACT_FLAGS_REPLACE;
2046 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
2047 				     extack);
2048 		break;
2049 	case RTM_DELACTION:
2050 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2051 				    portid, RTM_DELACTION, extack);
2052 		break;
2053 	case RTM_GETACTION:
2054 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2055 				    portid, RTM_GETACTION, extack);
2056 		break;
2057 	default:
2058 		BUG();
2059 	}
2060 
2061 	return ret;
2062 }
2063 
2064 static struct nlattr *find_dump_kind(struct nlattr **nla)
2065 {
2066 	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
2067 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
2068 	struct nlattr *kind;
2069 
2070 	tb1 = nla[TCA_ACT_TAB];
2071 	if (tb1 == NULL)
2072 		return NULL;
2073 
2074 	if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
2075 		return NULL;
2076 
2077 	if (tb[1] == NULL)
2078 		return NULL;
2079 	if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
2080 		return NULL;
2081 	kind = tb2[TCA_ACT_KIND];
2082 
2083 	return kind;
2084 }
2085 
2086 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
2087 {
2088 	struct net *net = sock_net(skb->sk);
2089 	struct nlmsghdr *nlh;
2090 	unsigned char *b = skb_tail_pointer(skb);
2091 	struct nlattr *nest;
2092 	struct tc_action_ops *a_o;
2093 	int ret = 0;
2094 	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
2095 	struct nlattr *tb[TCA_ROOT_MAX + 1];
2096 	struct nlattr *count_attr = NULL;
2097 	unsigned long jiffy_since = 0;
2098 	struct nlattr *kind = NULL;
2099 	struct nla_bitfield32 bf;
2100 	u32 msecs_since = 0;
2101 	u32 act_count = 0;
2102 
2103 	ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
2104 				     TCA_ROOT_MAX, tcaa_policy, cb->extack);
2105 	if (ret < 0)
2106 		return ret;
2107 
2108 	kind = find_dump_kind(tb);
2109 	if (kind == NULL) {
2110 		pr_info("tc_dump_action: action bad kind\n");
2111 		return 0;
2112 	}
2113 
2114 	a_o = tc_lookup_action(kind);
2115 	if (a_o == NULL)
2116 		return 0;
2117 
2118 	cb->args[2] = 0;
2119 	if (tb[TCA_ROOT_FLAGS]) {
2120 		bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
2121 		cb->args[2] = bf.value;
2122 	}
2123 
2124 	if (tb[TCA_ROOT_TIME_DELTA]) {
2125 		msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
2126 	}
2127 
2128 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2129 			cb->nlh->nlmsg_type, sizeof(*t), 0);
2130 	if (!nlh)
2131 		goto out_module_put;
2132 
2133 	if (msecs_since)
2134 		jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
2135 
2136 	t = nlmsg_data(nlh);
2137 	t->tca_family = AF_UNSPEC;
2138 	t->tca__pad1 = 0;
2139 	t->tca__pad2 = 0;
2140 	cb->args[3] = jiffy_since;
2141 	count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
2142 	if (!count_attr)
2143 		goto out_module_put;
2144 
2145 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
2146 	if (nest == NULL)
2147 		goto out_module_put;
2148 
2149 	ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL);
2150 	if (ret < 0)
2151 		goto out_module_put;
2152 
2153 	if (ret > 0) {
2154 		nla_nest_end(skb, nest);
2155 		ret = skb->len;
2156 		act_count = cb->args[1];
2157 		memcpy(nla_data(count_attr), &act_count, sizeof(u32));
2158 		cb->args[1] = 0;
2159 	} else
2160 		nlmsg_trim(skb, b);
2161 
2162 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2163 	if (NETLINK_CB(cb->skb).portid && ret)
2164 		nlh->nlmsg_flags |= NLM_F_MULTI;
2165 	module_put(a_o->owner);
2166 	return skb->len;
2167 
2168 out_module_put:
2169 	module_put(a_o->owner);
2170 	nlmsg_trim(skb, b);
2171 	return skb->len;
2172 }
2173 
2174 static int __init tc_action_init(void)
2175 {
2176 	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
2177 	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
2178 	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
2179 		      0);
2180 
2181 	return 0;
2182 }
2183 
2184 subsys_initcall(tc_action_init);
2185