1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
7
8 /*
9 Comparing to general packet classification problem,
10 RSVP needs only sevaral relatively simple rules:
11
12 * (dst, protocol) are always specified,
13 so that we are able to hash them.
14 * src may be exact, or may be wildcard, so that
15 we can keep a hash table plus one wildcard entry.
16 * source port (or flow label) is important only if src is given.
17
18 IMPLEMENTATION.
19
20 We use a two level hash table: The top level is keyed by
21 destination address and protocol ID, every bucket contains a list
22 of "rsvp sessions", identified by destination address, protocol and
23 DPI(="Destination Port ID"): triple (key, mask, offset).
24
25 Every bucket has a smaller hash table keyed by source address
26 (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
27 Every bucket is again a list of "RSVP flows", selected by
28 source address and SPI(="Source Port ID" here rather than
29 "security parameter index"): triple (key, mask, offset).
30
31
32 NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
33 and all fragmented packets go to the best-effort traffic class.
34
35
36 NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
37 only one "Generalized Port Identifier". So that for classic
38 ah, esp (and udp,tcp) both *pi should coincide or one of them
39 should be wildcard.
40
41 At first sight, this redundancy is just a waste of CPU
42 resources. But DPI and SPI add the possibility to assign different
43 priorities to GPIs. Look also at note 4 about tunnels below.
44
45
46 NOTE 3. One complication is the case of tunneled packets.
47 We implement it as following: if the first lookup
48 matches a special session with "tunnelhdr" value not zero,
49 flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
50 In this case, we pull tunnelhdr bytes and restart lookup
51 with tunnel ID added to the list of keys. Simple and stupid 8)8)
52 It's enough for PIMREG and IPIP.
53
54
55 NOTE 4. Two GPIs make it possible to parse even GRE packets.
56 F.e. DPI can select ETH_P_IP (and necessary flags to make
57 tunnelhdr correct) in GRE protocol field and SPI matches
58 GRE key. Is it not nice? 8)8)
59
60
61 Well, as result, despite its simplicity, we get a pretty
62 powerful classification engine. */
63
64
65 struct rsvp_head {
66 u32 tmap[256/32];
67 u32 hgenerator;
68 u8 tgenerator;
69 struct rsvp_session __rcu *ht[256];
70 struct rcu_head rcu;
71 };
72
73 struct rsvp_session {
74 struct rsvp_session __rcu *next;
75 __be32 dst[RSVP_DST_LEN];
76 struct tc_rsvp_gpi dpi;
77 u8 protocol;
78 u8 tunnelid;
79 /* 16 (src,sport) hash slots, and one wildcard source slot */
80 struct rsvp_filter __rcu *ht[16 + 1];
81 struct rcu_head rcu;
82 };
83
84
85 struct rsvp_filter {
86 struct rsvp_filter __rcu *next;
87 __be32 src[RSVP_DST_LEN];
88 struct tc_rsvp_gpi spi;
89 u8 tunnelhdr;
90
91 struct tcf_result res;
92 struct tcf_exts exts;
93
94 u32 handle;
95 struct rsvp_session *sess;
96 struct rcu_work rwork;
97 };
98
hash_dst(__be32 * dst,u8 protocol,u8 tunnelid)99 static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
100 {
101 unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
102
103 h ^= h>>16;
104 h ^= h>>8;
105 return (h ^ protocol ^ tunnelid) & 0xFF;
106 }
107
hash_src(__be32 * src)108 static inline unsigned int hash_src(__be32 *src)
109 {
110 unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
111
112 h ^= h>>16;
113 h ^= h>>8;
114 h ^= h>>4;
115 return h & 0xF;
116 }
117
118 #define RSVP_APPLY_RESULT() \
119 { \
120 int r = tcf_exts_exec(skb, &f->exts, res); \
121 if (r < 0) \
122 continue; \
123 else if (r > 0) \
124 return r; \
125 }
126
rsvp_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)127 static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
128 struct tcf_result *res)
129 {
130 struct rsvp_head *head = rcu_dereference_bh(tp->root);
131 struct rsvp_session *s;
132 struct rsvp_filter *f;
133 unsigned int h1, h2;
134 __be32 *dst, *src;
135 u8 protocol;
136 u8 tunnelid = 0;
137 u8 *xprt;
138 #if RSVP_DST_LEN == 4
139 struct ipv6hdr *nhptr;
140
141 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
142 return -1;
143 nhptr = ipv6_hdr(skb);
144 #else
145 struct iphdr *nhptr;
146
147 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
148 return -1;
149 nhptr = ip_hdr(skb);
150 #endif
151 restart:
152
153 #if RSVP_DST_LEN == 4
154 src = &nhptr->saddr.s6_addr32[0];
155 dst = &nhptr->daddr.s6_addr32[0];
156 protocol = nhptr->nexthdr;
157 xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
158 #else
159 src = &nhptr->saddr;
160 dst = &nhptr->daddr;
161 protocol = nhptr->protocol;
162 xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
163 if (ip_is_fragment(nhptr))
164 return -1;
165 #endif
166
167 h1 = hash_dst(dst, protocol, tunnelid);
168 h2 = hash_src(src);
169
170 for (s = rcu_dereference_bh(head->ht[h1]); s;
171 s = rcu_dereference_bh(s->next)) {
172 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
173 protocol == s->protocol &&
174 !(s->dpi.mask &
175 (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
176 #if RSVP_DST_LEN == 4
177 dst[0] == s->dst[0] &&
178 dst[1] == s->dst[1] &&
179 dst[2] == s->dst[2] &&
180 #endif
181 tunnelid == s->tunnelid) {
182
183 for (f = rcu_dereference_bh(s->ht[h2]); f;
184 f = rcu_dereference_bh(f->next)) {
185 if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
186 !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
187 #if RSVP_DST_LEN == 4
188 &&
189 src[0] == f->src[0] &&
190 src[1] == f->src[1] &&
191 src[2] == f->src[2]
192 #endif
193 ) {
194 *res = f->res;
195 RSVP_APPLY_RESULT();
196
197 matched:
198 if (f->tunnelhdr == 0)
199 return 0;
200
201 tunnelid = f->res.classid;
202 nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
203 goto restart;
204 }
205 }
206
207 /* And wildcard bucket... */
208 for (f = rcu_dereference_bh(s->ht[16]); f;
209 f = rcu_dereference_bh(f->next)) {
210 *res = f->res;
211 RSVP_APPLY_RESULT();
212 goto matched;
213 }
214 return -1;
215 }
216 }
217 return -1;
218 }
219
rsvp_replace(struct tcf_proto * tp,struct rsvp_filter * n,u32 h)220 static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
221 {
222 struct rsvp_head *head = rtnl_dereference(tp->root);
223 struct rsvp_session *s;
224 struct rsvp_filter __rcu **ins;
225 struct rsvp_filter *pins;
226 unsigned int h1 = h & 0xFF;
227 unsigned int h2 = (h >> 8) & 0xFF;
228
229 for (s = rtnl_dereference(head->ht[h1]); s;
230 s = rtnl_dereference(s->next)) {
231 for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
232 ins = &pins->next, pins = rtnl_dereference(*ins)) {
233 if (pins->handle == h) {
234 RCU_INIT_POINTER(n->next, pins->next);
235 rcu_assign_pointer(*ins, n);
236 return;
237 }
238 }
239 }
240
241 /* Something went wrong if we are trying to replace a non-existent
242 * node. Mind as well halt instead of silently failing.
243 */
244 BUG_ON(1);
245 }
246
rsvp_get(struct tcf_proto * tp,u32 handle)247 static void *rsvp_get(struct tcf_proto *tp, u32 handle)
248 {
249 struct rsvp_head *head = rtnl_dereference(tp->root);
250 struct rsvp_session *s;
251 struct rsvp_filter *f;
252 unsigned int h1 = handle & 0xFF;
253 unsigned int h2 = (handle >> 8) & 0xFF;
254
255 if (h2 > 16)
256 return NULL;
257
258 for (s = rtnl_dereference(head->ht[h1]); s;
259 s = rtnl_dereference(s->next)) {
260 for (f = rtnl_dereference(s->ht[h2]); f;
261 f = rtnl_dereference(f->next)) {
262 if (f->handle == handle)
263 return f;
264 }
265 }
266 return NULL;
267 }
268
rsvp_init(struct tcf_proto * tp)269 static int rsvp_init(struct tcf_proto *tp)
270 {
271 struct rsvp_head *data;
272
273 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
274 if (data) {
275 rcu_assign_pointer(tp->root, data);
276 return 0;
277 }
278 return -ENOBUFS;
279 }
280
__rsvp_delete_filter(struct rsvp_filter * f)281 static void __rsvp_delete_filter(struct rsvp_filter *f)
282 {
283 tcf_exts_destroy(&f->exts);
284 tcf_exts_put_net(&f->exts);
285 kfree(f);
286 }
287
rsvp_delete_filter_work(struct work_struct * work)288 static void rsvp_delete_filter_work(struct work_struct *work)
289 {
290 struct rsvp_filter *f = container_of(to_rcu_work(work),
291 struct rsvp_filter,
292 rwork);
293 rtnl_lock();
294 __rsvp_delete_filter(f);
295 rtnl_unlock();
296 }
297
rsvp_delete_filter(struct tcf_proto * tp,struct rsvp_filter * f)298 static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
299 {
300 tcf_unbind_filter(tp, &f->res);
301 /* all classifiers are required to call tcf_exts_destroy() after rcu
302 * grace period, since converted-to-rcu actions are relying on that
303 * in cleanup() callback
304 */
305 if (tcf_exts_get_net(&f->exts))
306 tcf_queue_work(&f->rwork, rsvp_delete_filter_work);
307 else
308 __rsvp_delete_filter(f);
309 }
310
rsvp_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)311 static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
312 struct netlink_ext_ack *extack)
313 {
314 struct rsvp_head *data = rtnl_dereference(tp->root);
315 int h1, h2;
316
317 if (data == NULL)
318 return;
319
320 for (h1 = 0; h1 < 256; h1++) {
321 struct rsvp_session *s;
322
323 while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
324 RCU_INIT_POINTER(data->ht[h1], s->next);
325
326 for (h2 = 0; h2 <= 16; h2++) {
327 struct rsvp_filter *f;
328
329 while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
330 rcu_assign_pointer(s->ht[h2], f->next);
331 rsvp_delete_filter(tp, f);
332 }
333 }
334 kfree_rcu(s, rcu);
335 }
336 }
337 kfree_rcu(data, rcu);
338 }
339
rsvp_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)340 static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
341 bool rtnl_held, struct netlink_ext_ack *extack)
342 {
343 struct rsvp_head *head = rtnl_dereference(tp->root);
344 struct rsvp_filter *nfp, *f = arg;
345 struct rsvp_filter __rcu **fp;
346 unsigned int h = f->handle;
347 struct rsvp_session __rcu **sp;
348 struct rsvp_session *nsp, *s = f->sess;
349 int i, h1;
350
351 fp = &s->ht[(h >> 8) & 0xFF];
352 for (nfp = rtnl_dereference(*fp); nfp;
353 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
354 if (nfp == f) {
355 RCU_INIT_POINTER(*fp, f->next);
356 rsvp_delete_filter(tp, f);
357
358 /* Strip tree */
359
360 for (i = 0; i <= 16; i++)
361 if (s->ht[i])
362 goto out;
363
364 /* OK, session has no flows */
365 sp = &head->ht[h & 0xFF];
366 for (nsp = rtnl_dereference(*sp); nsp;
367 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
368 if (nsp == s) {
369 RCU_INIT_POINTER(*sp, s->next);
370 kfree_rcu(s, rcu);
371 goto out;
372 }
373 }
374
375 break;
376 }
377 }
378
379 out:
380 *last = true;
381 for (h1 = 0; h1 < 256; h1++) {
382 if (rcu_access_pointer(head->ht[h1])) {
383 *last = false;
384 break;
385 }
386 }
387
388 return 0;
389 }
390
gen_handle(struct tcf_proto * tp,unsigned salt)391 static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
392 {
393 struct rsvp_head *data = rtnl_dereference(tp->root);
394 int i = 0xFFFF;
395
396 while (i-- > 0) {
397 u32 h;
398
399 if ((data->hgenerator += 0x10000) == 0)
400 data->hgenerator = 0x10000;
401 h = data->hgenerator|salt;
402 if (!rsvp_get(tp, h))
403 return h;
404 }
405 return 0;
406 }
407
tunnel_bts(struct rsvp_head * data)408 static int tunnel_bts(struct rsvp_head *data)
409 {
410 int n = data->tgenerator >> 5;
411 u32 b = 1 << (data->tgenerator & 0x1F);
412
413 if (data->tmap[n] & b)
414 return 0;
415 data->tmap[n] |= b;
416 return 1;
417 }
418
tunnel_recycle(struct rsvp_head * data)419 static void tunnel_recycle(struct rsvp_head *data)
420 {
421 struct rsvp_session __rcu **sht = data->ht;
422 u32 tmap[256/32];
423 int h1, h2;
424
425 memset(tmap, 0, sizeof(tmap));
426
427 for (h1 = 0; h1 < 256; h1++) {
428 struct rsvp_session *s;
429 for (s = rtnl_dereference(sht[h1]); s;
430 s = rtnl_dereference(s->next)) {
431 for (h2 = 0; h2 <= 16; h2++) {
432 struct rsvp_filter *f;
433
434 for (f = rtnl_dereference(s->ht[h2]); f;
435 f = rtnl_dereference(f->next)) {
436 if (f->tunnelhdr == 0)
437 continue;
438 data->tgenerator = f->res.classid;
439 tunnel_bts(data);
440 }
441 }
442 }
443 }
444
445 memcpy(data->tmap, tmap, sizeof(tmap));
446 }
447
gen_tunnel(struct rsvp_head * data)448 static u32 gen_tunnel(struct rsvp_head *data)
449 {
450 int i, k;
451
452 for (k = 0; k < 2; k++) {
453 for (i = 255; i > 0; i--) {
454 if (++data->tgenerator == 0)
455 data->tgenerator = 1;
456 if (tunnel_bts(data))
457 return data->tgenerator;
458 }
459 tunnel_recycle(data);
460 }
461 return 0;
462 }
463
464 static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
465 [TCA_RSVP_CLASSID] = { .type = NLA_U32 },
466 [TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) },
467 [TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) },
468 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
469 };
470
rsvp_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,bool ovr,bool rtnl_held,struct netlink_ext_ack * extack)471 static int rsvp_change(struct net *net, struct sk_buff *in_skb,
472 struct tcf_proto *tp, unsigned long base,
473 u32 handle,
474 struct nlattr **tca,
475 void **arg, bool ovr, bool rtnl_held,
476 struct netlink_ext_ack *extack)
477 {
478 struct rsvp_head *data = rtnl_dereference(tp->root);
479 struct rsvp_filter *f, *nfp;
480 struct rsvp_filter __rcu **fp;
481 struct rsvp_session *nsp, *s;
482 struct rsvp_session __rcu **sp;
483 struct tc_rsvp_pinfo *pinfo = NULL;
484 struct nlattr *opt = tca[TCA_OPTIONS];
485 struct nlattr *tb[TCA_RSVP_MAX + 1];
486 struct tcf_exts e;
487 unsigned int h1, h2;
488 __be32 *dst;
489 int err;
490
491 if (opt == NULL)
492 return handle ? -EINVAL : 0;
493
494 err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
495 NULL);
496 if (err < 0)
497 return err;
498
499 err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
500 if (err < 0)
501 return err;
502 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true,
503 extack);
504 if (err < 0)
505 goto errout2;
506
507 f = *arg;
508 if (f) {
509 /* Node exists: adjust only classid */
510 struct rsvp_filter *n;
511
512 if (f->handle != handle && handle)
513 goto errout2;
514
515 n = kmemdup(f, sizeof(*f), GFP_KERNEL);
516 if (!n) {
517 err = -ENOMEM;
518 goto errout2;
519 }
520
521 err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
522 TCA_RSVP_POLICE);
523 if (err < 0) {
524 kfree(n);
525 goto errout2;
526 }
527
528 if (tb[TCA_RSVP_CLASSID]) {
529 n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
530 tcf_bind_filter(tp, &n->res, base);
531 }
532
533 tcf_exts_change(&n->exts, &e);
534 rsvp_replace(tp, n, handle);
535 return 0;
536 }
537
538 /* Now more serious part... */
539 err = -EINVAL;
540 if (handle)
541 goto errout2;
542 if (tb[TCA_RSVP_DST] == NULL)
543 goto errout2;
544
545 err = -ENOBUFS;
546 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
547 if (f == NULL)
548 goto errout2;
549
550 err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
551 if (err < 0)
552 goto errout;
553 h2 = 16;
554 if (tb[TCA_RSVP_SRC]) {
555 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
556 h2 = hash_src(f->src);
557 }
558 if (tb[TCA_RSVP_PINFO]) {
559 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
560 f->spi = pinfo->spi;
561 f->tunnelhdr = pinfo->tunnelhdr;
562 }
563 if (tb[TCA_RSVP_CLASSID])
564 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
565
566 dst = nla_data(tb[TCA_RSVP_DST]);
567 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
568
569 err = -ENOMEM;
570 if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
571 goto errout;
572
573 if (f->tunnelhdr) {
574 err = -EINVAL;
575 if (f->res.classid > 255)
576 goto errout;
577
578 err = -ENOMEM;
579 if (f->res.classid == 0 &&
580 (f->res.classid = gen_tunnel(data)) == 0)
581 goto errout;
582 }
583
584 for (sp = &data->ht[h1];
585 (s = rtnl_dereference(*sp)) != NULL;
586 sp = &s->next) {
587 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
588 pinfo && pinfo->protocol == s->protocol &&
589 memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
590 #if RSVP_DST_LEN == 4
591 dst[0] == s->dst[0] &&
592 dst[1] == s->dst[1] &&
593 dst[2] == s->dst[2] &&
594 #endif
595 pinfo->tunnelid == s->tunnelid) {
596
597 insert:
598 /* OK, we found appropriate session */
599
600 fp = &s->ht[h2];
601
602 f->sess = s;
603 if (f->tunnelhdr == 0)
604 tcf_bind_filter(tp, &f->res, base);
605
606 tcf_exts_change(&f->exts, &e);
607
608 fp = &s->ht[h2];
609 for (nfp = rtnl_dereference(*fp); nfp;
610 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
611 __u32 mask = nfp->spi.mask & f->spi.mask;
612
613 if (mask != f->spi.mask)
614 break;
615 }
616 RCU_INIT_POINTER(f->next, nfp);
617 rcu_assign_pointer(*fp, f);
618
619 *arg = f;
620 return 0;
621 }
622 }
623
624 /* No session found. Create new one. */
625
626 err = -ENOBUFS;
627 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
628 if (s == NULL)
629 goto errout;
630 memcpy(s->dst, dst, sizeof(s->dst));
631
632 if (pinfo) {
633 s->dpi = pinfo->dpi;
634 s->protocol = pinfo->protocol;
635 s->tunnelid = pinfo->tunnelid;
636 }
637 sp = &data->ht[h1];
638 for (nsp = rtnl_dereference(*sp); nsp;
639 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
640 if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
641 break;
642 }
643 RCU_INIT_POINTER(s->next, nsp);
644 rcu_assign_pointer(*sp, s);
645
646 goto insert;
647
648 errout:
649 tcf_exts_destroy(&f->exts);
650 kfree(f);
651 errout2:
652 tcf_exts_destroy(&e);
653 return err;
654 }
655
rsvp_walk(struct tcf_proto * tp,struct tcf_walker * arg,bool rtnl_held)656 static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
657 bool rtnl_held)
658 {
659 struct rsvp_head *head = rtnl_dereference(tp->root);
660 unsigned int h, h1;
661
662 if (arg->stop)
663 return;
664
665 for (h = 0; h < 256; h++) {
666 struct rsvp_session *s;
667
668 for (s = rtnl_dereference(head->ht[h]); s;
669 s = rtnl_dereference(s->next)) {
670 for (h1 = 0; h1 <= 16; h1++) {
671 struct rsvp_filter *f;
672
673 for (f = rtnl_dereference(s->ht[h1]); f;
674 f = rtnl_dereference(f->next)) {
675 if (arg->count < arg->skip) {
676 arg->count++;
677 continue;
678 }
679 if (arg->fn(tp, f, arg) < 0) {
680 arg->stop = 1;
681 return;
682 }
683 arg->count++;
684 }
685 }
686 }
687 }
688 }
689
rsvp_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)690 static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
691 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
692 {
693 struct rsvp_filter *f = fh;
694 struct rsvp_session *s;
695 struct nlattr *nest;
696 struct tc_rsvp_pinfo pinfo;
697
698 if (f == NULL)
699 return skb->len;
700 s = f->sess;
701
702 t->tcm_handle = f->handle;
703
704 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
705 if (nest == NULL)
706 goto nla_put_failure;
707
708 if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
709 goto nla_put_failure;
710 pinfo.dpi = s->dpi;
711 pinfo.spi = f->spi;
712 pinfo.protocol = s->protocol;
713 pinfo.tunnelid = s->tunnelid;
714 pinfo.tunnelhdr = f->tunnelhdr;
715 pinfo.pad = 0;
716 if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
717 goto nla_put_failure;
718 if (f->res.classid &&
719 nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
720 goto nla_put_failure;
721 if (((f->handle >> 8) & 0xFF) != 16 &&
722 nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
723 goto nla_put_failure;
724
725 if (tcf_exts_dump(skb, &f->exts) < 0)
726 goto nla_put_failure;
727
728 nla_nest_end(skb, nest);
729
730 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
731 goto nla_put_failure;
732 return skb->len;
733
734 nla_put_failure:
735 nla_nest_cancel(skb, nest);
736 return -1;
737 }
738
rsvp_bind_class(void * fh,u32 classid,unsigned long cl,void * q,unsigned long base)739 static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
740 unsigned long base)
741 {
742 struct rsvp_filter *f = fh;
743
744 if (f && f->res.classid == classid) {
745 if (cl)
746 __tcf_bind_filter(q, &f->res, base);
747 else
748 __tcf_unbind_filter(q, &f->res);
749 }
750 }
751
752 static struct tcf_proto_ops RSVP_OPS __read_mostly = {
753 .kind = RSVP_ID,
754 .classify = rsvp_classify,
755 .init = rsvp_init,
756 .destroy = rsvp_destroy,
757 .get = rsvp_get,
758 .change = rsvp_change,
759 .delete = rsvp_delete,
760 .walk = rsvp_walk,
761 .dump = rsvp_dump,
762 .bind_class = rsvp_bind_class,
763 .owner = THIS_MODULE,
764 };
765
init_rsvp(void)766 static int __init init_rsvp(void)
767 {
768 return register_tcf_proto_ops(&RSVP_OPS);
769 }
770
exit_rsvp(void)771 static void __exit exit_rsvp(void)
772 {
773 unregister_tcf_proto_ops(&RSVP_OPS);
774 }
775
776 module_init(init_rsvp)
777 module_exit(exit_rsvp)
778