usb: gadget: f_mtp: Avoid race between mtp_read and mtp_function_disable
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / net / sched / cls_rsvp.h
1 /*
2 * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12 /*
13 Comparing to general packet classification problem,
14 RSVP needs only sevaral relatively simple rules:
15
16 * (dst, protocol) are always specified,
17 so that we are able to hash them.
18 * src may be exact, or may be wildcard, so that
19 we can keep a hash table plus one wildcard entry.
20 * source port (or flow label) is important only if src is given.
21
22 IMPLEMENTATION.
23
24 We use a two level hash table: The top level is keyed by
25 destination address and protocol ID, every bucket contains a list
26 of "rsvp sessions", identified by destination address, protocol and
27 DPI(="Destination Port ID"): triple (key, mask, offset).
28
29 Every bucket has a smaller hash table keyed by source address
30 (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
31 Every bucket is again a list of "RSVP flows", selected by
32 source address and SPI(="Source Port ID" here rather than
33 "security parameter index"): triple (key, mask, offset).
34
35
36 NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
37 and all fragmented packets go to the best-effort traffic class.
38
39
40 NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
41 only one "Generalized Port Identifier". So that for classic
42 ah, esp (and udp,tcp) both *pi should coincide or one of them
43 should be wildcard.
44
45 At first sight, this redundancy is just a waste of CPU
46 resources. But DPI and SPI add the possibility to assign different
47 priorities to GPIs. Look also at note 4 about tunnels below.
48
49
50 NOTE 3. One complication is the case of tunneled packets.
51 We implement it as following: if the first lookup
52 matches a special session with "tunnelhdr" value not zero,
53 flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
54 In this case, we pull tunnelhdr bytes and restart lookup
55 with tunnel ID added to the list of keys. Simple and stupid 8)8)
56 It's enough for PIMREG and IPIP.
57
58
59 NOTE 4. Two GPIs make it possible to parse even GRE packets.
60 F.e. DPI can select ETH_P_IP (and necessary flags to make
61 tunnelhdr correct) in GRE protocol field and SPI matches
62 GRE key. Is it not nice? 8)8)
63
64
65 Well, as result, despite its simplicity, we get a pretty
66 powerful classification engine. */
67
68
69 struct rsvp_head {
70 u32 tmap[256/32];
71 u32 hgenerator;
72 u8 tgenerator;
73 struct rsvp_session __rcu *ht[256];
74 struct rcu_head rcu;
75 };
76
77 struct rsvp_session {
78 struct rsvp_session __rcu *next;
79 __be32 dst[RSVP_DST_LEN];
80 struct tc_rsvp_gpi dpi;
81 u8 protocol;
82 u8 tunnelid;
83 /* 16 (src,sport) hash slots, and one wildcard source slot */
84 struct rsvp_filter __rcu *ht[16 + 1];
85 struct rcu_head rcu;
86 };
87
88
89 struct rsvp_filter {
90 struct rsvp_filter __rcu *next;
91 __be32 src[RSVP_DST_LEN];
92 struct tc_rsvp_gpi spi;
93 u8 tunnelhdr;
94
95 struct tcf_result res;
96 struct tcf_exts exts;
97
98 u32 handle;
99 struct rsvp_session *sess;
100 struct rcu_head rcu;
101 };
102
103 static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
104 {
105 unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
106
107 h ^= h>>16;
108 h ^= h>>8;
109 return (h ^ protocol ^ tunnelid) & 0xFF;
110 }
111
112 static inline unsigned int hash_src(__be32 *src)
113 {
114 unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
115
116 h ^= h>>16;
117 h ^= h>>8;
118 h ^= h>>4;
119 return h & 0xF;
120 }
121
122 #define RSVP_APPLY_RESULT() \
123 { \
124 int r = tcf_exts_exec(skb, &f->exts, res); \
125 if (r < 0) \
126 continue; \
127 else if (r > 0) \
128 return r; \
129 }
130
131 static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
132 struct tcf_result *res)
133 {
134 struct rsvp_head *head = rcu_dereference_bh(tp->root);
135 struct rsvp_session *s;
136 struct rsvp_filter *f;
137 unsigned int h1, h2;
138 __be32 *dst, *src;
139 u8 protocol;
140 u8 tunnelid = 0;
141 u8 *xprt;
142 #if RSVP_DST_LEN == 4
143 struct ipv6hdr *nhptr;
144
145 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
146 return -1;
147 nhptr = ipv6_hdr(skb);
148 #else
149 struct iphdr *nhptr;
150
151 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
152 return -1;
153 nhptr = ip_hdr(skb);
154 #endif
155 if (unlikely(!head))
156 return -1;
157 restart:
158
159 #if RSVP_DST_LEN == 4
160 src = &nhptr->saddr.s6_addr32[0];
161 dst = &nhptr->daddr.s6_addr32[0];
162 protocol = nhptr->nexthdr;
163 xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
164 #else
165 src = &nhptr->saddr;
166 dst = &nhptr->daddr;
167 protocol = nhptr->protocol;
168 xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
169 if (ip_is_fragment(nhptr))
170 return -1;
171 #endif
172
173 h1 = hash_dst(dst, protocol, tunnelid);
174 h2 = hash_src(src);
175
176 for (s = rcu_dereference_bh(head->ht[h1]); s;
177 s = rcu_dereference_bh(s->next)) {
178 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
179 protocol == s->protocol &&
180 !(s->dpi.mask &
181 (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
182 #if RSVP_DST_LEN == 4
183 dst[0] == s->dst[0] &&
184 dst[1] == s->dst[1] &&
185 dst[2] == s->dst[2] &&
186 #endif
187 tunnelid == s->tunnelid) {
188
189 for (f = rcu_dereference_bh(s->ht[h2]); f;
190 f = rcu_dereference_bh(f->next)) {
191 if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
192 !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
193 #if RSVP_DST_LEN == 4
194 &&
195 src[0] == f->src[0] &&
196 src[1] == f->src[1] &&
197 src[2] == f->src[2]
198 #endif
199 ) {
200 *res = f->res;
201 RSVP_APPLY_RESULT();
202
203 matched:
204 if (f->tunnelhdr == 0)
205 return 0;
206
207 tunnelid = f->res.classid;
208 nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
209 goto restart;
210 }
211 }
212
213 /* And wildcard bucket... */
214 for (f = rcu_dereference_bh(s->ht[16]); f;
215 f = rcu_dereference_bh(f->next)) {
216 *res = f->res;
217 RSVP_APPLY_RESULT();
218 goto matched;
219 }
220 return -1;
221 }
222 }
223 return -1;
224 }
225
226 static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
227 {
228 struct rsvp_head *head = rtnl_dereference(tp->root);
229 struct rsvp_session *s;
230 struct rsvp_filter __rcu **ins;
231 struct rsvp_filter *pins;
232 unsigned int h1 = h & 0xFF;
233 unsigned int h2 = (h >> 8) & 0xFF;
234
235 for (s = rtnl_dereference(head->ht[h1]); s;
236 s = rtnl_dereference(s->next)) {
237 for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
238 ins = &pins->next, pins = rtnl_dereference(*ins)) {
239 if (pins->handle == h) {
240 RCU_INIT_POINTER(n->next, pins->next);
241 rcu_assign_pointer(*ins, n);
242 return;
243 }
244 }
245 }
246
247 /* Something went wrong if we are trying to replace a non-existant
248 * node. Mind as well halt instead of silently failing.
249 */
250 BUG_ON(1);
251 }
252
253 static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
254 {
255 struct rsvp_head *head = rtnl_dereference(tp->root);
256 struct rsvp_session *s;
257 struct rsvp_filter *f;
258 unsigned int h1 = handle & 0xFF;
259 unsigned int h2 = (handle >> 8) & 0xFF;
260
261 if (h2 > 16)
262 return 0;
263
264 for (s = rtnl_dereference(head->ht[h1]); s;
265 s = rtnl_dereference(s->next)) {
266 for (f = rtnl_dereference(s->ht[h2]); f;
267 f = rtnl_dereference(f->next)) {
268 if (f->handle == handle)
269 return (unsigned long)f;
270 }
271 }
272 return 0;
273 }
274
275 static int rsvp_init(struct tcf_proto *tp)
276 {
277 struct rsvp_head *data;
278
279 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
280 if (data) {
281 rcu_assign_pointer(tp->root, data);
282 return 0;
283 }
284 return -ENOBUFS;
285 }
286
287 static void rsvp_delete_filter_rcu(struct rcu_head *head)
288 {
289 struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
290
291 tcf_exts_destroy(&f->exts);
292 kfree(f);
293 }
294
295 static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
296 {
297 tcf_unbind_filter(tp, &f->res);
298 /* all classifiers are required to call tcf_exts_destroy() after rcu
299 * grace period, since converted-to-rcu actions are relying on that
300 * in cleanup() callback
301 */
302 call_rcu(&f->rcu, rsvp_delete_filter_rcu);
303 }
304
305 static bool rsvp_destroy(struct tcf_proto *tp, bool force)
306 {
307 struct rsvp_head *data = rtnl_dereference(tp->root);
308 int h1, h2;
309
310 if (data == NULL)
311 return true;
312
313 if (!force) {
314 for (h1 = 0; h1 < 256; h1++) {
315 if (rcu_access_pointer(data->ht[h1]))
316 return false;
317 }
318 }
319
320 RCU_INIT_POINTER(tp->root, NULL);
321
322 for (h1 = 0; h1 < 256; h1++) {
323 struct rsvp_session *s;
324
325 while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
326 RCU_INIT_POINTER(data->ht[h1], s->next);
327
328 for (h2 = 0; h2 <= 16; h2++) {
329 struct rsvp_filter *f;
330
331 while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
332 rcu_assign_pointer(s->ht[h2], f->next);
333 rsvp_delete_filter(tp, f);
334 }
335 }
336 kfree_rcu(s, rcu);
337 }
338 }
339 kfree_rcu(data, rcu);
340 return true;
341 }
342
343 static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
344 {
345 struct rsvp_head *head = rtnl_dereference(tp->root);
346 struct rsvp_filter *nfp, *f = (struct rsvp_filter *)arg;
347 struct rsvp_filter __rcu **fp;
348 unsigned int h = f->handle;
349 struct rsvp_session __rcu **sp;
350 struct rsvp_session *nsp, *s = f->sess;
351 int i;
352
353 fp = &s->ht[(h >> 8) & 0xFF];
354 for (nfp = rtnl_dereference(*fp); nfp;
355 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
356 if (nfp == f) {
357 RCU_INIT_POINTER(*fp, f->next);
358 rsvp_delete_filter(tp, f);
359
360 /* Strip tree */
361
362 for (i = 0; i <= 16; i++)
363 if (s->ht[i])
364 return 0;
365
366 /* OK, session has no flows */
367 sp = &head->ht[h & 0xFF];
368 for (nsp = rtnl_dereference(*sp); nsp;
369 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
370 if (nsp == s) {
371 RCU_INIT_POINTER(*sp, s->next);
372 kfree_rcu(s, rcu);
373 return 0;
374 }
375 }
376
377 return 0;
378 }
379 }
380 return 0;
381 }
382
383 static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
384 {
385 struct rsvp_head *data = rtnl_dereference(tp->root);
386 int i = 0xFFFF;
387
388 while (i-- > 0) {
389 u32 h;
390
391 if ((data->hgenerator += 0x10000) == 0)
392 data->hgenerator = 0x10000;
393 h = data->hgenerator|salt;
394 if (rsvp_get(tp, h) == 0)
395 return h;
396 }
397 return 0;
398 }
399
400 static int tunnel_bts(struct rsvp_head *data)
401 {
402 int n = data->tgenerator >> 5;
403 u32 b = 1 << (data->tgenerator & 0x1F);
404
405 if (data->tmap[n] & b)
406 return 0;
407 data->tmap[n] |= b;
408 return 1;
409 }
410
411 static void tunnel_recycle(struct rsvp_head *data)
412 {
413 struct rsvp_session __rcu **sht = data->ht;
414 u32 tmap[256/32];
415 int h1, h2;
416
417 memset(tmap, 0, sizeof(tmap));
418
419 for (h1 = 0; h1 < 256; h1++) {
420 struct rsvp_session *s;
421 for (s = rtnl_dereference(sht[h1]); s;
422 s = rtnl_dereference(s->next)) {
423 for (h2 = 0; h2 <= 16; h2++) {
424 struct rsvp_filter *f;
425
426 for (f = rtnl_dereference(s->ht[h2]); f;
427 f = rtnl_dereference(f->next)) {
428 if (f->tunnelhdr == 0)
429 continue;
430 data->tgenerator = f->res.classid;
431 tunnel_bts(data);
432 }
433 }
434 }
435 }
436
437 memcpy(data->tmap, tmap, sizeof(tmap));
438 }
439
440 static u32 gen_tunnel(struct rsvp_head *data)
441 {
442 int i, k;
443
444 for (k = 0; k < 2; k++) {
445 for (i = 255; i > 0; i--) {
446 if (++data->tgenerator == 0)
447 data->tgenerator = 1;
448 if (tunnel_bts(data))
449 return data->tgenerator;
450 }
451 tunnel_recycle(data);
452 }
453 return 0;
454 }
455
456 static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
457 [TCA_RSVP_CLASSID] = { .type = NLA_U32 },
458 [TCA_RSVP_DST] = { .type = NLA_BINARY,
459 .len = RSVP_DST_LEN * sizeof(u32) },
460 [TCA_RSVP_SRC] = { .type = NLA_BINARY,
461 .len = RSVP_DST_LEN * sizeof(u32) },
462 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
463 };
464
465 static int rsvp_change(struct net *net, struct sk_buff *in_skb,
466 struct tcf_proto *tp, unsigned long base,
467 u32 handle,
468 struct nlattr **tca,
469 unsigned long *arg, bool ovr)
470 {
471 struct rsvp_head *data = rtnl_dereference(tp->root);
472 struct rsvp_filter *f, *nfp;
473 struct rsvp_filter __rcu **fp;
474 struct rsvp_session *nsp, *s;
475 struct rsvp_session __rcu **sp;
476 struct tc_rsvp_pinfo *pinfo = NULL;
477 struct nlattr *opt = tca[TCA_OPTIONS];
478 struct nlattr *tb[TCA_RSVP_MAX + 1];
479 struct tcf_exts e;
480 unsigned int h1, h2;
481 __be32 *dst;
482 int err;
483
484 if (opt == NULL)
485 return handle ? -EINVAL : 0;
486
487 err = nla_parse_nested(tb, TCA_RSVP_MAX, opt, rsvp_policy);
488 if (err < 0)
489 return err;
490
491 tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
492 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
493 if (err < 0)
494 return err;
495
496 f = (struct rsvp_filter *)*arg;
497 if (f) {
498 /* Node exists: adjust only classid */
499 struct rsvp_filter *n;
500
501 if (f->handle != handle && handle)
502 goto errout2;
503
504 n = kmemdup(f, sizeof(*f), GFP_KERNEL);
505 if (!n) {
506 err = -ENOMEM;
507 goto errout2;
508 }
509
510 tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
511
512 if (tb[TCA_RSVP_CLASSID]) {
513 n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
514 tcf_bind_filter(tp, &n->res, base);
515 }
516
517 tcf_exts_change(tp, &n->exts, &e);
518 rsvp_replace(tp, n, handle);
519 return 0;
520 }
521
522 /* Now more serious part... */
523 err = -EINVAL;
524 if (handle)
525 goto errout2;
526 if (tb[TCA_RSVP_DST] == NULL)
527 goto errout2;
528
529 err = -ENOBUFS;
530 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
531 if (f == NULL)
532 goto errout2;
533
534 tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
535 h2 = 16;
536 if (tb[TCA_RSVP_SRC]) {
537 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
538 h2 = hash_src(f->src);
539 }
540 if (tb[TCA_RSVP_PINFO]) {
541 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
542 f->spi = pinfo->spi;
543 f->tunnelhdr = pinfo->tunnelhdr;
544 }
545 if (tb[TCA_RSVP_CLASSID])
546 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
547
548 dst = nla_data(tb[TCA_RSVP_DST]);
549 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
550
551 err = -ENOMEM;
552 if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
553 goto errout;
554
555 if (f->tunnelhdr) {
556 err = -EINVAL;
557 if (f->res.classid > 255)
558 goto errout;
559
560 err = -ENOMEM;
561 if (f->res.classid == 0 &&
562 (f->res.classid = gen_tunnel(data)) == 0)
563 goto errout;
564 }
565
566 for (sp = &data->ht[h1];
567 (s = rtnl_dereference(*sp)) != NULL;
568 sp = &s->next) {
569 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
570 pinfo && pinfo->protocol == s->protocol &&
571 memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
572 #if RSVP_DST_LEN == 4
573 dst[0] == s->dst[0] &&
574 dst[1] == s->dst[1] &&
575 dst[2] == s->dst[2] &&
576 #endif
577 pinfo->tunnelid == s->tunnelid) {
578
579 insert:
580 /* OK, we found appropriate session */
581
582 fp = &s->ht[h2];
583
584 f->sess = s;
585 if (f->tunnelhdr == 0)
586 tcf_bind_filter(tp, &f->res, base);
587
588 tcf_exts_change(tp, &f->exts, &e);
589
590 fp = &s->ht[h2];
591 for (nfp = rtnl_dereference(*fp); nfp;
592 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
593 __u32 mask = nfp->spi.mask & f->spi.mask;
594
595 if (mask != f->spi.mask)
596 break;
597 }
598 RCU_INIT_POINTER(f->next, nfp);
599 rcu_assign_pointer(*fp, f);
600
601 *arg = (unsigned long)f;
602 return 0;
603 }
604 }
605
606 /* No session found. Create new one. */
607
608 err = -ENOBUFS;
609 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
610 if (s == NULL)
611 goto errout;
612 memcpy(s->dst, dst, sizeof(s->dst));
613
614 if (pinfo) {
615 s->dpi = pinfo->dpi;
616 s->protocol = pinfo->protocol;
617 s->tunnelid = pinfo->tunnelid;
618 }
619 sp = &data->ht[h1];
620 for (nsp = rtnl_dereference(*sp); nsp;
621 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
622 if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
623 break;
624 }
625 RCU_INIT_POINTER(s->next, nsp);
626 rcu_assign_pointer(*sp, s);
627
628 goto insert;
629
630 errout:
631 kfree(f);
632 errout2:
633 tcf_exts_destroy(&e);
634 return err;
635 }
636
637 static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
638 {
639 struct rsvp_head *head = rtnl_dereference(tp->root);
640 unsigned int h, h1;
641
642 if (arg->stop)
643 return;
644
645 for (h = 0; h < 256; h++) {
646 struct rsvp_session *s;
647
648 for (s = rtnl_dereference(head->ht[h]); s;
649 s = rtnl_dereference(s->next)) {
650 for (h1 = 0; h1 <= 16; h1++) {
651 struct rsvp_filter *f;
652
653 for (f = rtnl_dereference(s->ht[h1]); f;
654 f = rtnl_dereference(f->next)) {
655 if (arg->count < arg->skip) {
656 arg->count++;
657 continue;
658 }
659 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
660 arg->stop = 1;
661 return;
662 }
663 arg->count++;
664 }
665 }
666 }
667 }
668 }
669
670 static int rsvp_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
671 struct sk_buff *skb, struct tcmsg *t)
672 {
673 struct rsvp_filter *f = (struct rsvp_filter *)fh;
674 struct rsvp_session *s;
675 struct nlattr *nest;
676 struct tc_rsvp_pinfo pinfo;
677
678 if (f == NULL)
679 return skb->len;
680 s = f->sess;
681
682 t->tcm_handle = f->handle;
683
684 nest = nla_nest_start(skb, TCA_OPTIONS);
685 if (nest == NULL)
686 goto nla_put_failure;
687
688 if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
689 goto nla_put_failure;
690 pinfo.dpi = s->dpi;
691 pinfo.spi = f->spi;
692 pinfo.protocol = s->protocol;
693 pinfo.tunnelid = s->tunnelid;
694 pinfo.tunnelhdr = f->tunnelhdr;
695 pinfo.pad = 0;
696 if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
697 goto nla_put_failure;
698 if (f->res.classid &&
699 nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
700 goto nla_put_failure;
701 if (((f->handle >> 8) & 0xFF) != 16 &&
702 nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
703 goto nla_put_failure;
704
705 if (tcf_exts_dump(skb, &f->exts) < 0)
706 goto nla_put_failure;
707
708 nla_nest_end(skb, nest);
709
710 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
711 goto nla_put_failure;
712 return skb->len;
713
714 nla_put_failure:
715 nla_nest_cancel(skb, nest);
716 return -1;
717 }
718
719 static struct tcf_proto_ops RSVP_OPS __read_mostly = {
720 .kind = RSVP_ID,
721 .classify = rsvp_classify,
722 .init = rsvp_init,
723 .destroy = rsvp_destroy,
724 .get = rsvp_get,
725 .change = rsvp_change,
726 .delete = rsvp_delete,
727 .walk = rsvp_walk,
728 .dump = rsvp_dump,
729 .owner = THIS_MODULE,
730 };
731
732 static int __init init_rsvp(void)
733 {
734 return register_tcf_proto_ops(&RSVP_OPS);
735 }
736
737 static void __exit exit_rsvp(void)
738 {
739 unregister_tcf_proto_ops(&RSVP_OPS);
740 }
741
742 module_init(init_rsvp)
743 module_exit(exit_rsvp)