1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright 2020 NXP */
3 
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/errno.h>
9 #include <linux/skbuff.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/tc_act/tc_gate.h>
17 
18 static unsigned int gate_net_id;
19 static struct tc_action_ops act_gate_ops;
20 
gate_get_time(struct tcf_gate * gact)21 static ktime_t gate_get_time(struct tcf_gate *gact)
22 {
23 	ktime_t mono = ktime_get();
24 
25 	switch (gact->tk_offset) {
26 	case TK_OFFS_MAX:
27 		return mono;
28 	default:
29 		return ktime_mono_to_any(mono, gact->tk_offset);
30 	}
31 
32 	return KTIME_MAX;
33 }
34 
gate_get_start_time(struct tcf_gate * gact,ktime_t * start)35 static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
36 {
37 	struct tcf_gate_params *param = &gact->param;
38 	ktime_t now, base, cycle;
39 	u64 n;
40 
41 	base = ns_to_ktime(param->tcfg_basetime);
42 	now = gate_get_time(gact);
43 
44 	if (ktime_after(base, now)) {
45 		*start = base;
46 		return;
47 	}
48 
49 	cycle = param->tcfg_cycletime;
50 
51 	n = div64_u64(ktime_sub_ns(now, base), cycle);
52 	*start = ktime_add_ns(base, (n + 1) * cycle);
53 }
54 
gate_start_timer(struct tcf_gate * gact,ktime_t start)55 static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
56 {
57 	ktime_t expires;
58 
59 	expires = hrtimer_get_expires(&gact->hitimer);
60 	if (expires == 0)
61 		expires = KTIME_MAX;
62 
63 	start = min_t(ktime_t, start, expires);
64 
65 	hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT);
66 }
67 
gate_timer_func(struct hrtimer * timer)68 static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
69 {
70 	struct tcf_gate *gact = container_of(timer, struct tcf_gate,
71 					     hitimer);
72 	struct tcf_gate_params *p = &gact->param;
73 	struct tcfg_gate_entry *next;
74 	ktime_t close_time, now;
75 
76 	spin_lock(&gact->tcf_lock);
77 
78 	next = gact->next_entry;
79 
80 	/* cycle start, clear pending bit, clear total octets */
81 	gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0;
82 	gact->current_entry_octets = 0;
83 	gact->current_max_octets = next->maxoctets;
84 
85 	gact->current_close_time = ktime_add_ns(gact->current_close_time,
86 						next->interval);
87 
88 	close_time = gact->current_close_time;
89 
90 	if (list_is_last(&next->list, &p->entries))
91 		next = list_first_entry(&p->entries,
92 					struct tcfg_gate_entry, list);
93 	else
94 		next = list_next_entry(next, list);
95 
96 	now = gate_get_time(gact);
97 
98 	if (ktime_after(now, close_time)) {
99 		ktime_t cycle, base;
100 		u64 n;
101 
102 		cycle = p->tcfg_cycletime;
103 		base = ns_to_ktime(p->tcfg_basetime);
104 		n = div64_u64(ktime_sub_ns(now, base), cycle);
105 		close_time = ktime_add_ns(base, (n + 1) * cycle);
106 	}
107 
108 	gact->next_entry = next;
109 
110 	hrtimer_set_expires(&gact->hitimer, close_time);
111 
112 	spin_unlock(&gact->tcf_lock);
113 
114 	return HRTIMER_RESTART;
115 }
116 
tcf_gate_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)117 static int tcf_gate_act(struct sk_buff *skb, const struct tc_action *a,
118 			struct tcf_result *res)
119 {
120 	struct tcf_gate *gact = to_gate(a);
121 
122 	spin_lock(&gact->tcf_lock);
123 
124 	tcf_lastuse_update(&gact->tcf_tm);
125 	bstats_update(&gact->tcf_bstats, skb);
126 
127 	if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) {
128 		spin_unlock(&gact->tcf_lock);
129 		return gact->tcf_action;
130 	}
131 
132 	if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN))
133 		goto drop;
134 
135 	if (gact->current_max_octets >= 0) {
136 		gact->current_entry_octets += qdisc_pkt_len(skb);
137 		if (gact->current_entry_octets > gact->current_max_octets) {
138 			gact->tcf_qstats.overlimits++;
139 			goto drop;
140 		}
141 	}
142 
143 	spin_unlock(&gact->tcf_lock);
144 
145 	return gact->tcf_action;
146 drop:
147 	gact->tcf_qstats.drops++;
148 	spin_unlock(&gact->tcf_lock);
149 
150 	return TC_ACT_SHOT;
151 }
152 
153 static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = {
154 	[TCA_GATE_ENTRY_INDEX]		= { .type = NLA_U32 },
155 	[TCA_GATE_ENTRY_GATE]		= { .type = NLA_FLAG },
156 	[TCA_GATE_ENTRY_INTERVAL]	= { .type = NLA_U32 },
157 	[TCA_GATE_ENTRY_IPV]		= { .type = NLA_S32 },
158 	[TCA_GATE_ENTRY_MAX_OCTETS]	= { .type = NLA_S32 },
159 };
160 
161 static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = {
162 	[TCA_GATE_PARMS]		=
163 		NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)),
164 	[TCA_GATE_PRIORITY]		= { .type = NLA_S32 },
165 	[TCA_GATE_ENTRY_LIST]		= { .type = NLA_NESTED },
166 	[TCA_GATE_BASE_TIME]		= { .type = NLA_U64 },
167 	[TCA_GATE_CYCLE_TIME]		= { .type = NLA_U64 },
168 	[TCA_GATE_CYCLE_TIME_EXT]	= { .type = NLA_U64 },
169 	[TCA_GATE_FLAGS]		= { .type = NLA_U32 },
170 	[TCA_GATE_CLOCKID]		= { .type = NLA_S32 },
171 };
172 
fill_gate_entry(struct nlattr ** tb,struct tcfg_gate_entry * entry,struct netlink_ext_ack * extack)173 static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
174 			   struct netlink_ext_ack *extack)
175 {
176 	u32 interval = 0;
177 
178 	entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]);
179 
180 	if (tb[TCA_GATE_ENTRY_INTERVAL])
181 		interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]);
182 
183 	if (interval == 0) {
184 		NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
185 		return -EINVAL;
186 	}
187 
188 	entry->interval = interval;
189 
190 	if (tb[TCA_GATE_ENTRY_IPV])
191 		entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]);
192 	else
193 		entry->ipv = -1;
194 
195 	if (tb[TCA_GATE_ENTRY_MAX_OCTETS])
196 		entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]);
197 	else
198 		entry->maxoctets = -1;
199 
200 	return 0;
201 }
202 
parse_gate_entry(struct nlattr * n,struct tcfg_gate_entry * entry,int index,struct netlink_ext_ack * extack)203 static int parse_gate_entry(struct nlattr *n, struct  tcfg_gate_entry *entry,
204 			    int index, struct netlink_ext_ack *extack)
205 {
206 	struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { };
207 	int err;
208 
209 	err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack);
210 	if (err < 0) {
211 		NL_SET_ERR_MSG(extack, "Could not parse nested entry");
212 		return -EINVAL;
213 	}
214 
215 	entry->index = index;
216 
217 	return fill_gate_entry(tb, entry, extack);
218 }
219 
release_entry_list(struct list_head * entries)220 static void release_entry_list(struct list_head *entries)
221 {
222 	struct tcfg_gate_entry *entry, *e;
223 
224 	list_for_each_entry_safe(entry, e, entries, list) {
225 		list_del(&entry->list);
226 		kfree(entry);
227 	}
228 }
229 
parse_gate_list(struct nlattr * list_attr,struct tcf_gate_params * sched,struct netlink_ext_ack * extack)230 static int parse_gate_list(struct nlattr *list_attr,
231 			   struct tcf_gate_params *sched,
232 			   struct netlink_ext_ack *extack)
233 {
234 	struct tcfg_gate_entry *entry;
235 	struct nlattr *n;
236 	int err, rem;
237 	int i = 0;
238 
239 	if (!list_attr)
240 		return -EINVAL;
241 
242 	nla_for_each_nested(n, list_attr, rem) {
243 		if (nla_type(n) != TCA_GATE_ONE_ENTRY) {
244 			NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'");
245 			continue;
246 		}
247 
248 		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
249 		if (!entry) {
250 			NL_SET_ERR_MSG(extack, "Not enough memory for entry");
251 			err = -ENOMEM;
252 			goto release_list;
253 		}
254 
255 		err = parse_gate_entry(n, entry, i, extack);
256 		if (err < 0) {
257 			kfree(entry);
258 			goto release_list;
259 		}
260 
261 		list_add_tail(&entry->list, &sched->entries);
262 		i++;
263 	}
264 
265 	sched->num_entries = i;
266 
267 	return i;
268 
269 release_list:
270 	release_entry_list(&sched->entries);
271 
272 	return err;
273 }
274 
gate_setup_timer(struct tcf_gate * gact,u64 basetime,enum tk_offsets tko,s32 clockid,bool do_init)275 static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
276 			     enum tk_offsets tko, s32 clockid,
277 			     bool do_init)
278 {
279 	if (!do_init) {
280 		if (basetime == gact->param.tcfg_basetime &&
281 		    tko == gact->tk_offset &&
282 		    clockid == gact->param.tcfg_clockid)
283 			return;
284 
285 		spin_unlock_bh(&gact->tcf_lock);
286 		hrtimer_cancel(&gact->hitimer);
287 		spin_lock_bh(&gact->tcf_lock);
288 	}
289 	gact->param.tcfg_basetime = basetime;
290 	gact->param.tcfg_clockid = clockid;
291 	gact->tk_offset = tko;
292 	hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
293 	gact->hitimer.function = gate_timer_func;
294 }
295 
tcf_gate_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,int ovr,int bind,bool rtnl_held,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)296 static int tcf_gate_init(struct net *net, struct nlattr *nla,
297 			 struct nlattr *est, struct tc_action **a,
298 			 int ovr, int bind, bool rtnl_held,
299 			 struct tcf_proto *tp, u32 flags,
300 			 struct netlink_ext_ack *extack)
301 {
302 	struct tc_action_net *tn = net_generic(net, gate_net_id);
303 	enum tk_offsets tk_offset = TK_OFFS_TAI;
304 	struct nlattr *tb[TCA_GATE_MAX + 1];
305 	struct tcf_chain *goto_ch = NULL;
306 	u64 cycletime = 0, basetime = 0;
307 	struct tcf_gate_params *p;
308 	s32 clockid = CLOCK_TAI;
309 	struct tcf_gate *gact;
310 	struct tc_gate *parm;
311 	int ret = 0, err;
312 	u32 gflags = 0;
313 	s32 prio = -1;
314 	ktime_t start;
315 	u32 index;
316 
317 	if (!nla)
318 		return -EINVAL;
319 
320 	err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack);
321 	if (err < 0)
322 		return err;
323 
324 	if (!tb[TCA_GATE_PARMS])
325 		return -EINVAL;
326 
327 	if (tb[TCA_GATE_CLOCKID]) {
328 		clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
329 		switch (clockid) {
330 		case CLOCK_REALTIME:
331 			tk_offset = TK_OFFS_REAL;
332 			break;
333 		case CLOCK_MONOTONIC:
334 			tk_offset = TK_OFFS_MAX;
335 			break;
336 		case CLOCK_BOOTTIME:
337 			tk_offset = TK_OFFS_BOOT;
338 			break;
339 		case CLOCK_TAI:
340 			tk_offset = TK_OFFS_TAI;
341 			break;
342 		default:
343 			NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
344 			return -EINVAL;
345 		}
346 	}
347 
348 	parm = nla_data(tb[TCA_GATE_PARMS]);
349 	index = parm->index;
350 
351 	err = tcf_idr_check_alloc(tn, &index, a, bind);
352 	if (err < 0)
353 		return err;
354 
355 	if (err && bind)
356 		return 0;
357 
358 	if (!err) {
359 		ret = tcf_idr_create(tn, index, est, a,
360 				     &act_gate_ops, bind, false, 0);
361 		if (ret) {
362 			tcf_idr_cleanup(tn, index);
363 			return ret;
364 		}
365 
366 		ret = ACT_P_CREATED;
367 	} else if (!ovr) {
368 		tcf_idr_release(*a, bind);
369 		return -EEXIST;
370 	}
371 
372 	if (tb[TCA_GATE_PRIORITY])
373 		prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
374 
375 	if (tb[TCA_GATE_BASE_TIME])
376 		basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]);
377 
378 	if (tb[TCA_GATE_FLAGS])
379 		gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
380 
381 	gact = to_gate(*a);
382 	if (ret == ACT_P_CREATED)
383 		INIT_LIST_HEAD(&gact->param.entries);
384 
385 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
386 	if (err < 0)
387 		goto release_idr;
388 
389 	spin_lock_bh(&gact->tcf_lock);
390 	p = &gact->param;
391 
392 	if (tb[TCA_GATE_CYCLE_TIME])
393 		cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
394 
395 	if (tb[TCA_GATE_ENTRY_LIST]) {
396 		err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
397 		if (err < 0)
398 			goto chain_put;
399 	}
400 
401 	if (!cycletime) {
402 		struct tcfg_gate_entry *entry;
403 		ktime_t cycle = 0;
404 
405 		list_for_each_entry(entry, &p->entries, list)
406 			cycle = ktime_add_ns(cycle, entry->interval);
407 		cycletime = cycle;
408 		if (!cycletime) {
409 			err = -EINVAL;
410 			goto chain_put;
411 		}
412 	}
413 	p->tcfg_cycletime = cycletime;
414 
415 	if (tb[TCA_GATE_CYCLE_TIME_EXT])
416 		p->tcfg_cycletime_ext =
417 			nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
418 
419 	gate_setup_timer(gact, basetime, tk_offset, clockid,
420 			 ret == ACT_P_CREATED);
421 	p->tcfg_priority = prio;
422 	p->tcfg_flags = gflags;
423 	gate_get_start_time(gact, &start);
424 
425 	gact->current_close_time = start;
426 	gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
427 
428 	gact->next_entry = list_first_entry(&p->entries,
429 					    struct tcfg_gate_entry, list);
430 
431 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
432 
433 	gate_start_timer(gact, start);
434 
435 	spin_unlock_bh(&gact->tcf_lock);
436 
437 	if (goto_ch)
438 		tcf_chain_put_by_act(goto_ch);
439 
440 	return ret;
441 
442 chain_put:
443 	spin_unlock_bh(&gact->tcf_lock);
444 
445 	if (goto_ch)
446 		tcf_chain_put_by_act(goto_ch);
447 release_idr:
448 	/* action is not inserted in any list: it's safe to init hitimer
449 	 * without taking tcf_lock.
450 	 */
451 	if (ret == ACT_P_CREATED)
452 		gate_setup_timer(gact, gact->param.tcfg_basetime,
453 				 gact->tk_offset, gact->param.tcfg_clockid,
454 				 true);
455 	tcf_idr_release(*a, bind);
456 	return err;
457 }
458 
tcf_gate_cleanup(struct tc_action * a)459 static void tcf_gate_cleanup(struct tc_action *a)
460 {
461 	struct tcf_gate *gact = to_gate(a);
462 	struct tcf_gate_params *p;
463 
464 	p = &gact->param;
465 	hrtimer_cancel(&gact->hitimer);
466 	release_entry_list(&p->entries);
467 }
468 
dumping_entry(struct sk_buff * skb,struct tcfg_gate_entry * entry)469 static int dumping_entry(struct sk_buff *skb,
470 			 struct tcfg_gate_entry *entry)
471 {
472 	struct nlattr *item;
473 
474 	item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY);
475 	if (!item)
476 		return -ENOSPC;
477 
478 	if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index))
479 		goto nla_put_failure;
480 
481 	if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE))
482 		goto nla_put_failure;
483 
484 	if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval))
485 		goto nla_put_failure;
486 
487 	if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets))
488 		goto nla_put_failure;
489 
490 	if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv))
491 		goto nla_put_failure;
492 
493 	return nla_nest_end(skb, item);
494 
495 nla_put_failure:
496 	nla_nest_cancel(skb, item);
497 	return -1;
498 }
499 
tcf_gate_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)500 static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
501 			 int bind, int ref)
502 {
503 	unsigned char *b = skb_tail_pointer(skb);
504 	struct tcf_gate *gact = to_gate(a);
505 	struct tc_gate opt = {
506 		.index    = gact->tcf_index,
507 		.refcnt   = refcount_read(&gact->tcf_refcnt) - ref,
508 		.bindcnt  = atomic_read(&gact->tcf_bindcnt) - bind,
509 	};
510 	struct tcfg_gate_entry *entry;
511 	struct tcf_gate_params *p;
512 	struct nlattr *entry_list;
513 	struct tcf_t t;
514 
515 	spin_lock_bh(&gact->tcf_lock);
516 	opt.action = gact->tcf_action;
517 
518 	p = &gact->param;
519 
520 	if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
521 		goto nla_put_failure;
522 
523 	if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME,
524 			      p->tcfg_basetime, TCA_GATE_PAD))
525 		goto nla_put_failure;
526 
527 	if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME,
528 			      p->tcfg_cycletime, TCA_GATE_PAD))
529 		goto nla_put_failure;
530 
531 	if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT,
532 			      p->tcfg_cycletime_ext, TCA_GATE_PAD))
533 		goto nla_put_failure;
534 
535 	if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid))
536 		goto nla_put_failure;
537 
538 	if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags))
539 		goto nla_put_failure;
540 
541 	if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority))
542 		goto nla_put_failure;
543 
544 	entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST);
545 	if (!entry_list)
546 		goto nla_put_failure;
547 
548 	list_for_each_entry(entry, &p->entries, list) {
549 		if (dumping_entry(skb, entry) < 0)
550 			goto nla_put_failure;
551 	}
552 
553 	nla_nest_end(skb, entry_list);
554 
555 	tcf_tm_dump(&t, &gact->tcf_tm);
556 	if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
557 		goto nla_put_failure;
558 	spin_unlock_bh(&gact->tcf_lock);
559 
560 	return skb->len;
561 
562 nla_put_failure:
563 	spin_unlock_bh(&gact->tcf_lock);
564 	nlmsg_trim(skb, b);
565 	return -1;
566 }
567 
tcf_gate_walker(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)568 static int tcf_gate_walker(struct net *net, struct sk_buff *skb,
569 			   struct netlink_callback *cb, int type,
570 			   const struct tc_action_ops *ops,
571 			   struct netlink_ext_ack *extack)
572 {
573 	struct tc_action_net *tn = net_generic(net, gate_net_id);
574 
575 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
576 }
577 
tcf_gate_stats_update(struct tc_action * a,u64 bytes,u64 packets,u64 drops,u64 lastuse,bool hw)578 static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets,
579 				  u64 drops, u64 lastuse, bool hw)
580 {
581 	struct tcf_gate *gact = to_gate(a);
582 	struct tcf_t *tm = &gact->tcf_tm;
583 
584 	tcf_action_update_stats(a, bytes, packets, drops, hw);
585 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
586 }
587 
tcf_gate_search(struct net * net,struct tc_action ** a,u32 index)588 static int tcf_gate_search(struct net *net, struct tc_action **a, u32 index)
589 {
590 	struct tc_action_net *tn = net_generic(net, gate_net_id);
591 
592 	return tcf_idr_search(tn, a, index);
593 }
594 
tcf_gate_get_fill_size(const struct tc_action * act)595 static size_t tcf_gate_get_fill_size(const struct tc_action *act)
596 {
597 	return nla_total_size(sizeof(struct tc_gate));
598 }
599 
600 static struct tc_action_ops act_gate_ops = {
601 	.kind		=	"gate",
602 	.id		=	TCA_ID_GATE,
603 	.owner		=	THIS_MODULE,
604 	.act		=	tcf_gate_act,
605 	.dump		=	tcf_gate_dump,
606 	.init		=	tcf_gate_init,
607 	.cleanup	=	tcf_gate_cleanup,
608 	.walk		=	tcf_gate_walker,
609 	.stats_update	=	tcf_gate_stats_update,
610 	.get_fill_size	=	tcf_gate_get_fill_size,
611 	.lookup		=	tcf_gate_search,
612 	.size		=	sizeof(struct tcf_gate),
613 };
614 
gate_init_net(struct net * net)615 static __net_init int gate_init_net(struct net *net)
616 {
617 	struct tc_action_net *tn = net_generic(net, gate_net_id);
618 
619 	return tc_action_net_init(net, tn, &act_gate_ops);
620 }
621 
gate_exit_net(struct list_head * net_list)622 static void __net_exit gate_exit_net(struct list_head *net_list)
623 {
624 	tc_action_net_exit(net_list, gate_net_id);
625 }
626 
627 static struct pernet_operations gate_net_ops = {
628 	.init = gate_init_net,
629 	.exit_batch = gate_exit_net,
630 	.id   = &gate_net_id,
631 	.size = sizeof(struct tc_action_net),
632 };
633 
gate_init_module(void)634 static int __init gate_init_module(void)
635 {
636 	return tcf_register_action(&act_gate_ops, &gate_net_ops);
637 }
638 
gate_cleanup_module(void)639 static void __exit gate_cleanup_module(void)
640 {
641 	tcf_unregister_action(&act_gate_ops, &gate_net_ops);
642 }
643 
644 module_init(gate_init_module);
645 module_exit(gate_cleanup_module);
646 MODULE_LICENSE("GPL v2");
647