xref: /linux/net/sched/act_skbedit.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008, Intel Corporation.
4  *
5  * Author: Alexander Duyck <alexander.h.duyck@intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/rtnetlink.h>
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
15 #include <net/ip.h>
16 #include <net/ipv6.h>
17 #include <net/dsfield.h>
18 #include <net/pkt_cls.h>
19 
20 #include <linux/tc_act/tc_skbedit.h>
21 #include <net/tc_act/tc_skbedit.h>
22 
23 static struct tc_action_ops act_skbedit_ops;
24 
25 static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params,
26 			    struct sk_buff *skb)
27 {
28 	u16 queue_mapping = params->queue_mapping;
29 
30 	if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
31 		u32 hash = skb_get_hash(skb);
32 
33 		queue_mapping += hash % params->mapping_mod;
34 	}
35 
36 	return netdev_cap_txqueue(skb->dev, queue_mapping);
37 }
38 
39 static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
40 			   struct tcf_result *res)
41 {
42 	struct tcf_skbedit *d = to_skbedit(a);
43 	struct tcf_skbedit_params *params;
44 	int action;
45 
46 	tcf_lastuse_update(&d->tcf_tm);
47 	bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
48 
49 	params = rcu_dereference_bh(d->params);
50 	action = READ_ONCE(d->tcf_action);
51 
52 	if (params->flags & SKBEDIT_F_PRIORITY)
53 		skb->priority = params->priority;
54 	if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
55 		int wlen = skb_network_offset(skb);
56 
57 		switch (skb_protocol(skb, true)) {
58 		case htons(ETH_P_IP):
59 			wlen += sizeof(struct iphdr);
60 			if (!pskb_may_pull(skb, wlen))
61 				goto err;
62 			skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
63 			break;
64 
65 		case htons(ETH_P_IPV6):
66 			wlen += sizeof(struct ipv6hdr);
67 			if (!pskb_may_pull(skb, wlen))
68 				goto err;
69 			skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
70 			break;
71 		}
72 	}
73 	if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
74 	    skb->dev->real_num_tx_queues > params->queue_mapping) {
75 #ifdef CONFIG_NET_EGRESS
76 		netdev_xmit_skip_txqueue(true);
77 #endif
78 		skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb));
79 	}
80 	if (params->flags & SKBEDIT_F_MARK) {
81 		skb->mark &= ~params->mask;
82 		skb->mark |= params->mark & params->mask;
83 	}
84 	if (params->flags & SKBEDIT_F_PTYPE)
85 		skb->pkt_type = params->ptype;
86 	return action;
87 
88 err:
89 	qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
90 	return TC_ACT_SHOT;
91 }
92 
93 static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes,
94 				     u64 packets, u64 drops,
95 				     u64 lastuse, bool hw)
96 {
97 	struct tcf_skbedit *d = to_skbedit(a);
98 	struct tcf_t *tm = &d->tcf_tm;
99 
100 	tcf_action_update_stats(a, bytes, packets, drops, hw);
101 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
102 }
103 
104 static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
105 	[TCA_SKBEDIT_PARMS]		= { .len = sizeof(struct tc_skbedit) },
106 	[TCA_SKBEDIT_PRIORITY]		= { .len = sizeof(u32) },
107 	[TCA_SKBEDIT_QUEUE_MAPPING]	= { .len = sizeof(u16) },
108 	[TCA_SKBEDIT_MARK]		= { .len = sizeof(u32) },
109 	[TCA_SKBEDIT_PTYPE]		= { .len = sizeof(u16) },
110 	[TCA_SKBEDIT_MASK]		= { .len = sizeof(u32) },
111 	[TCA_SKBEDIT_FLAGS]		= { .len = sizeof(u64) },
112 	[TCA_SKBEDIT_QUEUE_MAPPING_MAX]	= { .len = sizeof(u16) },
113 };
114 
115 static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
116 			    struct nlattr *est, struct tc_action **a,
117 			    struct tcf_proto *tp, u32 act_flags,
118 			    struct netlink_ext_ack *extack)
119 {
120 	struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
121 	bool bind = act_flags & TCA_ACT_FLAGS_BIND;
122 	struct tcf_skbedit_params *params_new;
123 	struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
124 	struct tcf_chain *goto_ch = NULL;
125 	struct tc_skbedit *parm;
126 	struct tcf_skbedit *d;
127 	u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
128 	u16 *queue_mapping = NULL, *ptype = NULL;
129 	u16 mapping_mod = 1;
130 	bool exists = false;
131 	int ret = 0, err;
132 	u32 index;
133 
134 	if (nla == NULL)
135 		return -EINVAL;
136 
137 	err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla,
138 					  skbedit_policy, NULL);
139 	if (err < 0)
140 		return err;
141 
142 	if (tb[TCA_SKBEDIT_PARMS] == NULL)
143 		return -EINVAL;
144 
145 	if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
146 		flags |= SKBEDIT_F_PRIORITY;
147 		priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
148 	}
149 
150 	if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
151 		flags |= SKBEDIT_F_QUEUE_MAPPING;
152 		queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
153 	}
154 
155 	if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
156 		ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
157 		if (!skb_pkt_type_ok(*ptype))
158 			return -EINVAL;
159 		flags |= SKBEDIT_F_PTYPE;
160 	}
161 
162 	if (tb[TCA_SKBEDIT_MARK] != NULL) {
163 		flags |= SKBEDIT_F_MARK;
164 		mark = nla_data(tb[TCA_SKBEDIT_MARK]);
165 	}
166 
167 	if (tb[TCA_SKBEDIT_MASK] != NULL) {
168 		flags |= SKBEDIT_F_MASK;
169 		mask = nla_data(tb[TCA_SKBEDIT_MASK]);
170 	}
171 
172 	if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
173 		u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
174 
175 		if (*pure_flags & SKBEDIT_F_TXQ_SKBHASH) {
176 			u16 *queue_mapping_max;
177 
178 			if (!tb[TCA_SKBEDIT_QUEUE_MAPPING] ||
179 			    !tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]) {
180 				NL_SET_ERR_MSG_MOD(extack, "Missing required range of queue_mapping.");
181 				return -EINVAL;
182 			}
183 
184 			queue_mapping_max =
185 				nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]);
186 			if (*queue_mapping_max < *queue_mapping) {
187 				NL_SET_ERR_MSG_MOD(extack, "The range of queue_mapping is invalid, max < min.");
188 				return -EINVAL;
189 			}
190 
191 			mapping_mod = *queue_mapping_max - *queue_mapping + 1;
192 			flags |= SKBEDIT_F_TXQ_SKBHASH;
193 		}
194 		if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
195 			flags |= SKBEDIT_F_INHERITDSFIELD;
196 	}
197 
198 	parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
199 	index = parm->index;
200 	err = tcf_idr_check_alloc(tn, &index, a, bind);
201 	if (err < 0)
202 		return err;
203 	exists = err;
204 	if (exists && bind)
205 		return 0;
206 
207 	if (!flags) {
208 		if (exists)
209 			tcf_idr_release(*a, bind);
210 		else
211 			tcf_idr_cleanup(tn, index);
212 		return -EINVAL;
213 	}
214 
215 	if (!exists) {
216 		ret = tcf_idr_create(tn, index, est, a,
217 				     &act_skbedit_ops, bind, true, act_flags);
218 		if (ret) {
219 			tcf_idr_cleanup(tn, index);
220 			return ret;
221 		}
222 
223 		d = to_skbedit(*a);
224 		ret = ACT_P_CREATED;
225 	} else {
226 		d = to_skbedit(*a);
227 		if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) {
228 			tcf_idr_release(*a, bind);
229 			return -EEXIST;
230 		}
231 	}
232 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
233 	if (err < 0)
234 		goto release_idr;
235 
236 	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
237 	if (unlikely(!params_new)) {
238 		err = -ENOMEM;
239 		goto put_chain;
240 	}
241 
242 	params_new->flags = flags;
243 	if (flags & SKBEDIT_F_PRIORITY)
244 		params_new->priority = *priority;
245 	if (flags & SKBEDIT_F_QUEUE_MAPPING) {
246 		params_new->queue_mapping = *queue_mapping;
247 		params_new->mapping_mod = mapping_mod;
248 	}
249 	if (flags & SKBEDIT_F_MARK)
250 		params_new->mark = *mark;
251 	if (flags & SKBEDIT_F_PTYPE)
252 		params_new->ptype = *ptype;
253 	/* default behaviour is to use all the bits */
254 	params_new->mask = 0xffffffff;
255 	if (flags & SKBEDIT_F_MASK)
256 		params_new->mask = *mask;
257 
258 	spin_lock_bh(&d->tcf_lock);
259 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
260 	params_new = rcu_replace_pointer(d->params, params_new,
261 					 lockdep_is_held(&d->tcf_lock));
262 	spin_unlock_bh(&d->tcf_lock);
263 	if (params_new)
264 		kfree_rcu(params_new, rcu);
265 	if (goto_ch)
266 		tcf_chain_put_by_act(goto_ch);
267 
268 	return ret;
269 put_chain:
270 	if (goto_ch)
271 		tcf_chain_put_by_act(goto_ch);
272 release_idr:
273 	tcf_idr_release(*a, bind);
274 	return err;
275 }
276 
277 static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
278 			    int bind, int ref)
279 {
280 	unsigned char *b = skb_tail_pointer(skb);
281 	struct tcf_skbedit *d = to_skbedit(a);
282 	struct tcf_skbedit_params *params;
283 	struct tc_skbedit opt = {
284 		.index   = d->tcf_index,
285 		.refcnt  = refcount_read(&d->tcf_refcnt) - ref,
286 		.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
287 	};
288 	u64 pure_flags = 0;
289 	struct tcf_t t;
290 
291 	spin_lock_bh(&d->tcf_lock);
292 	params = rcu_dereference_protected(d->params,
293 					   lockdep_is_held(&d->tcf_lock));
294 	opt.action = d->tcf_action;
295 
296 	if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
297 		goto nla_put_failure;
298 	if ((params->flags & SKBEDIT_F_PRIORITY) &&
299 	    nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
300 		goto nla_put_failure;
301 	if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
302 	    nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
303 		goto nla_put_failure;
304 	if ((params->flags & SKBEDIT_F_MARK) &&
305 	    nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
306 		goto nla_put_failure;
307 	if ((params->flags & SKBEDIT_F_PTYPE) &&
308 	    nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
309 		goto nla_put_failure;
310 	if ((params->flags & SKBEDIT_F_MASK) &&
311 	    nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
312 		goto nla_put_failure;
313 	if (params->flags & SKBEDIT_F_INHERITDSFIELD)
314 		pure_flags |= SKBEDIT_F_INHERITDSFIELD;
315 	if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
316 		if (nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING_MAX,
317 				params->queue_mapping + params->mapping_mod - 1))
318 			goto nla_put_failure;
319 
320 		pure_flags |= SKBEDIT_F_TXQ_SKBHASH;
321 	}
322 	if (pure_flags != 0 &&
323 	    nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
324 		goto nla_put_failure;
325 
326 	tcf_tm_dump(&t, &d->tcf_tm);
327 	if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
328 		goto nla_put_failure;
329 	spin_unlock_bh(&d->tcf_lock);
330 
331 	return skb->len;
332 
333 nla_put_failure:
334 	spin_unlock_bh(&d->tcf_lock);
335 	nlmsg_trim(skb, b);
336 	return -1;
337 }
338 
339 static void tcf_skbedit_cleanup(struct tc_action *a)
340 {
341 	struct tcf_skbedit *d = to_skbedit(a);
342 	struct tcf_skbedit_params *params;
343 
344 	params = rcu_dereference_protected(d->params, 1);
345 	if (params)
346 		kfree_rcu(params, rcu);
347 }
348 
349 static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
350 {
351 	return nla_total_size(sizeof(struct tc_skbedit))
352 		+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
353 		+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
354 		+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING_MAX */
355 		+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
356 		+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
357 		+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
358 		+ nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
359 }
360 
361 static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data,
362 					 u32 *index_inc, bool bind,
363 					 struct netlink_ext_ack *extack)
364 {
365 	if (bind) {
366 		struct flow_action_entry *entry = entry_data;
367 
368 		if (is_tcf_skbedit_mark(act)) {
369 			entry->id = FLOW_ACTION_MARK;
370 			entry->mark = tcf_skbedit_mark(act);
371 		} else if (is_tcf_skbedit_ptype(act)) {
372 			entry->id = FLOW_ACTION_PTYPE;
373 			entry->ptype = tcf_skbedit_ptype(act);
374 		} else if (is_tcf_skbedit_priority(act)) {
375 			entry->id = FLOW_ACTION_PRIORITY;
376 			entry->priority = tcf_skbedit_priority(act);
377 		} else if (is_tcf_skbedit_queue_mapping(act)) {
378 			NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used");
379 			return -EOPNOTSUPP;
380 		} else if (is_tcf_skbedit_inheritdsfield(act)) {
381 			NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used");
382 			return -EOPNOTSUPP;
383 		} else {
384 			NL_SET_ERR_MSG_MOD(extack, "Unsupported skbedit option offload");
385 			return -EOPNOTSUPP;
386 		}
387 		*index_inc = 1;
388 	} else {
389 		struct flow_offload_action *fl_action = entry_data;
390 
391 		if (is_tcf_skbedit_mark(act))
392 			fl_action->id = FLOW_ACTION_MARK;
393 		else if (is_tcf_skbedit_ptype(act))
394 			fl_action->id = FLOW_ACTION_PTYPE;
395 		else if (is_tcf_skbedit_priority(act))
396 			fl_action->id = FLOW_ACTION_PRIORITY;
397 		else
398 			return -EOPNOTSUPP;
399 	}
400 
401 	return 0;
402 }
403 
404 static struct tc_action_ops act_skbedit_ops = {
405 	.kind		=	"skbedit",
406 	.id		=	TCA_ID_SKBEDIT,
407 	.owner		=	THIS_MODULE,
408 	.act		=	tcf_skbedit_act,
409 	.stats_update	=	tcf_skbedit_stats_update,
410 	.dump		=	tcf_skbedit_dump,
411 	.init		=	tcf_skbedit_init,
412 	.cleanup	=	tcf_skbedit_cleanup,
413 	.get_fill_size	=	tcf_skbedit_get_fill_size,
414 	.offload_act_setup =	tcf_skbedit_offload_act_setup,
415 	.size		=	sizeof(struct tcf_skbedit),
416 };
417 
418 static __net_init int skbedit_init_net(struct net *net)
419 {
420 	struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
421 
422 	return tc_action_net_init(net, tn, &act_skbedit_ops);
423 }
424 
425 static void __net_exit skbedit_exit_net(struct list_head *net_list)
426 {
427 	tc_action_net_exit(net_list, act_skbedit_ops.net_id);
428 }
429 
430 static struct pernet_operations skbedit_net_ops = {
431 	.init = skbedit_init_net,
432 	.exit_batch = skbedit_exit_net,
433 	.id   = &act_skbedit_ops.net_id,
434 	.size = sizeof(struct tc_action_net),
435 };
436 
437 MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
438 MODULE_DESCRIPTION("SKB Editing");
439 MODULE_LICENSE("GPL");
440 
441 static int __init skbedit_init_module(void)
442 {
443 	return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops);
444 }
445 
446 static void __exit skbedit_cleanup_module(void)
447 {
448 	tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops);
449 }
450 
451 module_init(skbedit_init_module);
452 module_exit(skbedit_cleanup_module);
453