include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / sched / act_api.c
1 /*
2 * net/sched/act_api.c Packet action API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Author: Jamal Hadi Salim
10 *
11 *
12 */
13
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <net/net_namespace.h>
24 #include <net/sock.h>
25 #include <net/sch_generic.h>
26 #include <net/act_api.h>
27 #include <net/netlink.h>
28
29 void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
30 {
31 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
32 struct tcf_common **p1p;
33
34 for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
35 if (*p1p == p) {
36 write_lock_bh(hinfo->lock);
37 *p1p = p->tcfc_next;
38 write_unlock_bh(hinfo->lock);
39 gen_kill_estimator(&p->tcfc_bstats,
40 &p->tcfc_rate_est);
41 kfree(p);
42 return;
43 }
44 }
45 WARN_ON(1);
46 }
47 EXPORT_SYMBOL(tcf_hash_destroy);
48
49 int tcf_hash_release(struct tcf_common *p, int bind,
50 struct tcf_hashinfo *hinfo)
51 {
52 int ret = 0;
53
54 if (p) {
55 if (bind)
56 p->tcfc_bindcnt--;
57
58 p->tcfc_refcnt--;
59 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
60 tcf_hash_destroy(p, hinfo);
61 ret = 1;
62 }
63 }
64 return ret;
65 }
66 EXPORT_SYMBOL(tcf_hash_release);
67
68 static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
69 struct tc_action *a, struct tcf_hashinfo *hinfo)
70 {
71 struct tcf_common *p;
72 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
73 struct nlattr *nest;
74
75 read_lock_bh(hinfo->lock);
76
77 s_i = cb->args[0];
78
79 for (i = 0; i < (hinfo->hmask + 1); i++) {
80 p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
81
82 for (; p; p = p->tcfc_next) {
83 index++;
84 if (index < s_i)
85 continue;
86 a->priv = p;
87 a->order = n_i;
88
89 nest = nla_nest_start(skb, a->order);
90 if (nest == NULL)
91 goto nla_put_failure;
92 err = tcf_action_dump_1(skb, a, 0, 0);
93 if (err < 0) {
94 index--;
95 nlmsg_trim(skb, nest);
96 goto done;
97 }
98 nla_nest_end(skb, nest);
99 n_i++;
100 if (n_i >= TCA_ACT_MAX_PRIO)
101 goto done;
102 }
103 }
104 done:
105 read_unlock_bh(hinfo->lock);
106 if (n_i)
107 cb->args[0] += n_i;
108 return n_i;
109
110 nla_put_failure:
111 nla_nest_cancel(skb, nest);
112 goto done;
113 }
114
115 static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
116 struct tcf_hashinfo *hinfo)
117 {
118 struct tcf_common *p, *s_p;
119 struct nlattr *nest;
120 int i= 0, n_i = 0;
121
122 nest = nla_nest_start(skb, a->order);
123 if (nest == NULL)
124 goto nla_put_failure;
125 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
126 for (i = 0; i < (hinfo->hmask + 1); i++) {
127 p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
128
129 while (p != NULL) {
130 s_p = p->tcfc_next;
131 if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
132 module_put(a->ops->owner);
133 n_i++;
134 p = s_p;
135 }
136 }
137 NLA_PUT_U32(skb, TCA_FCNT, n_i);
138 nla_nest_end(skb, nest);
139
140 return n_i;
141 nla_put_failure:
142 nla_nest_cancel(skb, nest);
143 return -EINVAL;
144 }
145
146 int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
147 int type, struct tc_action *a)
148 {
149 struct tcf_hashinfo *hinfo = a->ops->hinfo;
150
151 if (type == RTM_DELACTION) {
152 return tcf_del_walker(skb, a, hinfo);
153 } else if (type == RTM_GETACTION) {
154 return tcf_dump_walker(skb, cb, a, hinfo);
155 } else {
156 printk("tcf_generic_walker: unknown action %d\n", type);
157 return -EINVAL;
158 }
159 }
160 EXPORT_SYMBOL(tcf_generic_walker);
161
162 struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
163 {
164 struct tcf_common *p;
165
166 read_lock_bh(hinfo->lock);
167 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
168 p = p->tcfc_next) {
169 if (p->tcfc_index == index)
170 break;
171 }
172 read_unlock_bh(hinfo->lock);
173
174 return p;
175 }
176 EXPORT_SYMBOL(tcf_hash_lookup);
177
178 u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo)
179 {
180 u32 val = *idx_gen;
181
182 do {
183 if (++val == 0)
184 val = 1;
185 } while (tcf_hash_lookup(val, hinfo));
186
187 return (*idx_gen = val);
188 }
189 EXPORT_SYMBOL(tcf_hash_new_index);
190
191 int tcf_hash_search(struct tc_action *a, u32 index)
192 {
193 struct tcf_hashinfo *hinfo = a->ops->hinfo;
194 struct tcf_common *p = tcf_hash_lookup(index, hinfo);
195
196 if (p) {
197 a->priv = p;
198 return 1;
199 }
200 return 0;
201 }
202 EXPORT_SYMBOL(tcf_hash_search);
203
204 struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
205 struct tcf_hashinfo *hinfo)
206 {
207 struct tcf_common *p = NULL;
208 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
209 if (bind)
210 p->tcfc_bindcnt++;
211 p->tcfc_refcnt++;
212 a->priv = p;
213 }
214 return p;
215 }
216 EXPORT_SYMBOL(tcf_hash_check);
217
218 struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
219 struct tc_action *a, int size, int bind,
220 u32 *idx_gen, struct tcf_hashinfo *hinfo)
221 {
222 struct tcf_common *p = kzalloc(size, GFP_KERNEL);
223
224 if (unlikely(!p))
225 return ERR_PTR(-ENOMEM);
226 p->tcfc_refcnt = 1;
227 if (bind)
228 p->tcfc_bindcnt = 1;
229
230 spin_lock_init(&p->tcfc_lock);
231 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
232 p->tcfc_tm.install = jiffies;
233 p->tcfc_tm.lastuse = jiffies;
234 if (est) {
235 int err = gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
236 &p->tcfc_lock, est);
237 if (err) {
238 kfree(p);
239 return ERR_PTR(err);
240 }
241 }
242
243 a->priv = (void *) p;
244 return p;
245 }
246 EXPORT_SYMBOL(tcf_hash_create);
247
248 void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo)
249 {
250 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
251
252 write_lock_bh(hinfo->lock);
253 p->tcfc_next = hinfo->htab[h];
254 hinfo->htab[h] = p;
255 write_unlock_bh(hinfo->lock);
256 }
257 EXPORT_SYMBOL(tcf_hash_insert);
258
259 static struct tc_action_ops *act_base = NULL;
260 static DEFINE_RWLOCK(act_mod_lock);
261
262 int tcf_register_action(struct tc_action_ops *act)
263 {
264 struct tc_action_ops *a, **ap;
265
266 write_lock(&act_mod_lock);
267 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
268 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
269 write_unlock(&act_mod_lock);
270 return -EEXIST;
271 }
272 }
273 act->next = NULL;
274 *ap = act;
275 write_unlock(&act_mod_lock);
276 return 0;
277 }
278 EXPORT_SYMBOL(tcf_register_action);
279
280 int tcf_unregister_action(struct tc_action_ops *act)
281 {
282 struct tc_action_ops *a, **ap;
283 int err = -ENOENT;
284
285 write_lock(&act_mod_lock);
286 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next)
287 if (a == act)
288 break;
289 if (a) {
290 *ap = a->next;
291 a->next = NULL;
292 err = 0;
293 }
294 write_unlock(&act_mod_lock);
295 return err;
296 }
297 EXPORT_SYMBOL(tcf_unregister_action);
298
299 /* lookup by name */
300 static struct tc_action_ops *tc_lookup_action_n(char *kind)
301 {
302 struct tc_action_ops *a = NULL;
303
304 if (kind) {
305 read_lock(&act_mod_lock);
306 for (a = act_base; a; a = a->next) {
307 if (strcmp(kind, a->kind) == 0) {
308 if (!try_module_get(a->owner)) {
309 read_unlock(&act_mod_lock);
310 return NULL;
311 }
312 break;
313 }
314 }
315 read_unlock(&act_mod_lock);
316 }
317 return a;
318 }
319
320 /* lookup by nlattr */
321 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
322 {
323 struct tc_action_ops *a = NULL;
324
325 if (kind) {
326 read_lock(&act_mod_lock);
327 for (a = act_base; a; a = a->next) {
328 if (nla_strcmp(kind, a->kind) == 0) {
329 if (!try_module_get(a->owner)) {
330 read_unlock(&act_mod_lock);
331 return NULL;
332 }
333 break;
334 }
335 }
336 read_unlock(&act_mod_lock);
337 }
338 return a;
339 }
340
341 #if 0
342 /* lookup by id */
343 static struct tc_action_ops *tc_lookup_action_id(u32 type)
344 {
345 struct tc_action_ops *a = NULL;
346
347 if (type) {
348 read_lock(&act_mod_lock);
349 for (a = act_base; a; a = a->next) {
350 if (a->type == type) {
351 if (!try_module_get(a->owner)) {
352 read_unlock(&act_mod_lock);
353 return NULL;
354 }
355 break;
356 }
357 }
358 read_unlock(&act_mod_lock);
359 }
360 return a;
361 }
362 #endif
363
364 int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
365 struct tcf_result *res)
366 {
367 struct tc_action *a;
368 int ret = -1;
369
370 if (skb->tc_verd & TC_NCLS) {
371 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
372 ret = TC_ACT_OK;
373 goto exec_done;
374 }
375 while ((a = act) != NULL) {
376 repeat:
377 if (a->ops && a->ops->act) {
378 ret = a->ops->act(skb, a, res);
379 if (TC_MUNGED & skb->tc_verd) {
380 /* copied already, allow trampling */
381 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
382 skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
383 }
384 if (ret == TC_ACT_REPEAT)
385 goto repeat; /* we need a ttl - JHS */
386 if (ret != TC_ACT_PIPE)
387 goto exec_done;
388 }
389 act = a->next;
390 }
391 exec_done:
392 return ret;
393 }
394 EXPORT_SYMBOL(tcf_action_exec);
395
396 void tcf_action_destroy(struct tc_action *act, int bind)
397 {
398 struct tc_action *a;
399
400 for (a = act; a; a = act) {
401 if (a->ops && a->ops->cleanup) {
402 if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
403 module_put(a->ops->owner);
404 act = act->next;
405 kfree(a);
406 } else { /*FIXME: Remove later - catch insertion bugs*/
407 printk("tcf_action_destroy: BUG? destroying NULL ops\n");
408 act = act->next;
409 kfree(a);
410 }
411 }
412 }
413
414 int
415 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
416 {
417 int err = -EINVAL;
418
419 if (a->ops == NULL || a->ops->dump == NULL)
420 return err;
421 return a->ops->dump(skb, a, bind, ref);
422 }
423
424 int
425 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
426 {
427 int err = -EINVAL;
428 unsigned char *b = skb_tail_pointer(skb);
429 struct nlattr *nest;
430
431 if (a->ops == NULL || a->ops->dump == NULL)
432 return err;
433
434 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
435 if (tcf_action_copy_stats(skb, a, 0))
436 goto nla_put_failure;
437 nest = nla_nest_start(skb, TCA_OPTIONS);
438 if (nest == NULL)
439 goto nla_put_failure;
440 if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
441 nla_nest_end(skb, nest);
442 return err;
443 }
444
445 nla_put_failure:
446 nlmsg_trim(skb, b);
447 return -1;
448 }
449 EXPORT_SYMBOL(tcf_action_dump_1);
450
451 int
452 tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref)
453 {
454 struct tc_action *a;
455 int err = -EINVAL;
456 struct nlattr *nest;
457
458 while ((a = act) != NULL) {
459 act = a->next;
460 nest = nla_nest_start(skb, a->order);
461 if (nest == NULL)
462 goto nla_put_failure;
463 err = tcf_action_dump_1(skb, a, bind, ref);
464 if (err < 0)
465 goto errout;
466 nla_nest_end(skb, nest);
467 }
468
469 return 0;
470
471 nla_put_failure:
472 err = -EINVAL;
473 errout:
474 nla_nest_cancel(skb, nest);
475 return err;
476 }
477
478 struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
479 char *name, int ovr, int bind)
480 {
481 struct tc_action *a;
482 struct tc_action_ops *a_o;
483 char act_name[IFNAMSIZ];
484 struct nlattr *tb[TCA_ACT_MAX+1];
485 struct nlattr *kind;
486 int err;
487
488 if (name == NULL) {
489 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
490 if (err < 0)
491 goto err_out;
492 err = -EINVAL;
493 kind = tb[TCA_ACT_KIND];
494 if (kind == NULL)
495 goto err_out;
496 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
497 goto err_out;
498 } else {
499 err = -EINVAL;
500 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
501 goto err_out;
502 }
503
504 a_o = tc_lookup_action_n(act_name);
505 if (a_o == NULL) {
506 #ifdef CONFIG_MODULES
507 rtnl_unlock();
508 request_module("act_%s", act_name);
509 rtnl_lock();
510
511 a_o = tc_lookup_action_n(act_name);
512
513 /* We dropped the RTNL semaphore in order to
514 * perform the module load. So, even if we
515 * succeeded in loading the module we have to
516 * tell the caller to replay the request. We
517 * indicate this using -EAGAIN.
518 */
519 if (a_o != NULL) {
520 err = -EAGAIN;
521 goto err_mod;
522 }
523 #endif
524 err = -ENOENT;
525 goto err_out;
526 }
527
528 err = -ENOMEM;
529 a = kzalloc(sizeof(*a), GFP_KERNEL);
530 if (a == NULL)
531 goto err_mod;
532
533 /* backward compatibility for policer */
534 if (name == NULL)
535 err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
536 else
537 err = a_o->init(nla, est, a, ovr, bind);
538 if (err < 0)
539 goto err_free;
540
541 /* module count goes up only when brand new policy is created
542 if it exists and is only bound to in a_o->init() then
543 ACT_P_CREATED is not returned (a zero is).
544 */
545 if (err != ACT_P_CREATED)
546 module_put(a_o->owner);
547 a->ops = a_o;
548
549 return a;
550
551 err_free:
552 kfree(a);
553 err_mod:
554 module_put(a_o->owner);
555 err_out:
556 return ERR_PTR(err);
557 }
558
559 struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
560 char *name, int ovr, int bind)
561 {
562 struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
563 struct tc_action *head = NULL, *act, *act_prev = NULL;
564 int err;
565 int i;
566
567 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
568 if (err < 0)
569 return ERR_PTR(err);
570
571 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
572 act = tcf_action_init_1(tb[i], est, name, ovr, bind);
573 if (IS_ERR(act))
574 goto err;
575 act->order = i;
576
577 if (head == NULL)
578 head = act;
579 else
580 act_prev->next = act;
581 act_prev = act;
582 }
583 return head;
584
585 err:
586 if (head != NULL)
587 tcf_action_destroy(head, bind);
588 return act;
589 }
590
591 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
592 int compat_mode)
593 {
594 int err = 0;
595 struct gnet_dump d;
596 struct tcf_act_hdr *h = a->priv;
597
598 if (h == NULL)
599 goto errout;
600
601 /* compat_mode being true specifies a call that is supposed
602 * to add additional backward compatibility statistic TLVs.
603 */
604 if (compat_mode) {
605 if (a->type == TCA_OLD_COMPAT)
606 err = gnet_stats_start_copy_compat(skb, 0,
607 TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d);
608 else
609 return 0;
610 } else
611 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
612 &h->tcf_lock, &d);
613
614 if (err < 0)
615 goto errout;
616
617 if (a->ops != NULL && a->ops->get_stats != NULL)
618 if (a->ops->get_stats(skb, a) < 0)
619 goto errout;
620
621 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
622 gnet_stats_copy_rate_est(&d, &h->tcf_bstats,
623 &h->tcf_rate_est) < 0 ||
624 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
625 goto errout;
626
627 if (gnet_stats_finish_copy(&d) < 0)
628 goto errout;
629
630 return 0;
631
632 errout:
633 return -1;
634 }
635
636 static int
637 tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
638 u16 flags, int event, int bind, int ref)
639 {
640 struct tcamsg *t;
641 struct nlmsghdr *nlh;
642 unsigned char *b = skb_tail_pointer(skb);
643 struct nlattr *nest;
644
645 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
646
647 t = NLMSG_DATA(nlh);
648 t->tca_family = AF_UNSPEC;
649 t->tca__pad1 = 0;
650 t->tca__pad2 = 0;
651
652 nest = nla_nest_start(skb, TCA_ACT_TAB);
653 if (nest == NULL)
654 goto nla_put_failure;
655
656 if (tcf_action_dump(skb, a, bind, ref) < 0)
657 goto nla_put_failure;
658
659 nla_nest_end(skb, nest);
660
661 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
662 return skb->len;
663
664 nla_put_failure:
665 nlmsg_failure:
666 nlmsg_trim(skb, b);
667 return -1;
668 }
669
670 static int
671 act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
672 {
673 struct sk_buff *skb;
674
675 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
676 if (!skb)
677 return -ENOBUFS;
678 if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
679 kfree_skb(skb);
680 return -EINVAL;
681 }
682
683 return rtnl_unicast(skb, &init_net, pid);
684 }
685
686 static struct tc_action *
687 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
688 {
689 struct nlattr *tb[TCA_ACT_MAX+1];
690 struct tc_action *a;
691 int index;
692 int err;
693
694 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
695 if (err < 0)
696 goto err_out;
697
698 err = -EINVAL;
699 if (tb[TCA_ACT_INDEX] == NULL ||
700 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index))
701 goto err_out;
702 index = nla_get_u32(tb[TCA_ACT_INDEX]);
703
704 err = -ENOMEM;
705 a = kzalloc(sizeof(struct tc_action), GFP_KERNEL);
706 if (a == NULL)
707 goto err_out;
708
709 err = -EINVAL;
710 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
711 if (a->ops == NULL)
712 goto err_free;
713 if (a->ops->lookup == NULL)
714 goto err_mod;
715 err = -ENOENT;
716 if (a->ops->lookup(a, index) == 0)
717 goto err_mod;
718
719 module_put(a->ops->owner);
720 return a;
721
722 err_mod:
723 module_put(a->ops->owner);
724 err_free:
725 kfree(a);
726 err_out:
727 return ERR_PTR(err);
728 }
729
730 static void cleanup_a(struct tc_action *act)
731 {
732 struct tc_action *a;
733
734 for (a = act; a; a = act) {
735 act = a->next;
736 kfree(a);
737 }
738 }
739
740 static struct tc_action *create_a(int i)
741 {
742 struct tc_action *act;
743
744 act = kzalloc(sizeof(*act), GFP_KERNEL);
745 if (act == NULL) {
746 printk("create_a: failed to alloc!\n");
747 return NULL;
748 }
749 act->order = i;
750 return act;
751 }
752
753 static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
754 {
755 struct sk_buff *skb;
756 unsigned char *b;
757 struct nlmsghdr *nlh;
758 struct tcamsg *t;
759 struct netlink_callback dcb;
760 struct nlattr *nest;
761 struct nlattr *tb[TCA_ACT_MAX+1];
762 struct nlattr *kind;
763 struct tc_action *a = create_a(0);
764 int err = -ENOMEM;
765
766 if (a == NULL) {
767 printk("tca_action_flush: couldnt create tc_action\n");
768 return err;
769 }
770
771 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
772 if (!skb) {
773 printk("tca_action_flush: failed skb alloc\n");
774 kfree(a);
775 return err;
776 }
777
778 b = skb_tail_pointer(skb);
779
780 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
781 if (err < 0)
782 goto err_out;
783
784 err = -EINVAL;
785 kind = tb[TCA_ACT_KIND];
786 a->ops = tc_lookup_action(kind);
787 if (a->ops == NULL)
788 goto err_out;
789
790 nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t));
791 t = NLMSG_DATA(nlh);
792 t->tca_family = AF_UNSPEC;
793 t->tca__pad1 = 0;
794 t->tca__pad2 = 0;
795
796 nest = nla_nest_start(skb, TCA_ACT_TAB);
797 if (nest == NULL)
798 goto nla_put_failure;
799
800 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
801 if (err < 0)
802 goto nla_put_failure;
803 if (err == 0)
804 goto noflush_out;
805
806 nla_nest_end(skb, nest);
807
808 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
809 nlh->nlmsg_flags |= NLM_F_ROOT;
810 module_put(a->ops->owner);
811 kfree(a);
812 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
813 if (err > 0)
814 return 0;
815
816 return err;
817
818 nla_put_failure:
819 nlmsg_failure:
820 module_put(a->ops->owner);
821 err_out:
822 noflush_out:
823 kfree_skb(skb);
824 kfree(a);
825 return err;
826 }
827
828 static int
829 tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
830 {
831 int i, ret;
832 struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
833 struct tc_action *head = NULL, *act, *act_prev = NULL;
834
835 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
836 if (ret < 0)
837 return ret;
838
839 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
840 if (tb[1] != NULL)
841 return tca_action_flush(tb[1], n, pid);
842 else
843 return -EINVAL;
844 }
845
846 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
847 act = tcf_action_get_1(tb[i], n, pid);
848 if (IS_ERR(act)) {
849 ret = PTR_ERR(act);
850 goto err;
851 }
852 act->order = i;
853
854 if (head == NULL)
855 head = act;
856 else
857 act_prev->next = act;
858 act_prev = act;
859 }
860
861 if (event == RTM_GETACTION)
862 ret = act_get_notify(pid, n, head, event);
863 else { /* delete */
864 struct sk_buff *skb;
865
866 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
867 if (!skb) {
868 ret = -ENOBUFS;
869 goto err;
870 }
871
872 if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event,
873 0, 1) <= 0) {
874 kfree_skb(skb);
875 ret = -EINVAL;
876 goto err;
877 }
878
879 /* now do the delete */
880 tcf_action_destroy(head, 0);
881 ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC,
882 n->nlmsg_flags&NLM_F_ECHO);
883 if (ret > 0)
884 return 0;
885 return ret;
886 }
887 err:
888 cleanup_a(head);
889 return ret;
890 }
891
892 static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
893 u16 flags)
894 {
895 struct tcamsg *t;
896 struct nlmsghdr *nlh;
897 struct sk_buff *skb;
898 struct nlattr *nest;
899 unsigned char *b;
900 int err = 0;
901
902 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
903 if (!skb)
904 return -ENOBUFS;
905
906 b = skb_tail_pointer(skb);
907
908 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
909 t = NLMSG_DATA(nlh);
910 t->tca_family = AF_UNSPEC;
911 t->tca__pad1 = 0;
912 t->tca__pad2 = 0;
913
914 nest = nla_nest_start(skb, TCA_ACT_TAB);
915 if (nest == NULL)
916 goto nla_put_failure;
917
918 if (tcf_action_dump(skb, a, 0, 0) < 0)
919 goto nla_put_failure;
920
921 nla_nest_end(skb, nest);
922
923 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
924 NETLINK_CB(skb).dst_group = RTNLGRP_TC;
925
926 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
927 if (err > 0)
928 err = 0;
929 return err;
930
931 nla_put_failure:
932 nlmsg_failure:
933 kfree_skb(skb);
934 return -1;
935 }
936
937
938 static int
939 tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr)
940 {
941 int ret = 0;
942 struct tc_action *act;
943 struct tc_action *a;
944 u32 seq = n->nlmsg_seq;
945
946 act = tcf_action_init(nla, NULL, NULL, ovr, 0);
947 if (act == NULL)
948 goto done;
949 if (IS_ERR(act)) {
950 ret = PTR_ERR(act);
951 goto done;
952 }
953
954 /* dump then free all the actions after update; inserted policy
955 * stays intact
956 * */
957 ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
958 for (a = act; a; a = act) {
959 act = a->next;
960 kfree(a);
961 }
962 done:
963 return ret;
964 }
965
966 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
967 {
968 struct net *net = sock_net(skb->sk);
969 struct nlattr *tca[TCA_ACT_MAX + 1];
970 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
971 int ret = 0, ovr = 0;
972
973 if (!net_eq(net, &init_net))
974 return -EINVAL;
975
976 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
977 if (ret < 0)
978 return ret;
979
980 if (tca[TCA_ACT_TAB] == NULL) {
981 printk("tc_ctl_action: received NO action attribs\n");
982 return -EINVAL;
983 }
984
985 /* n->nlmsg_flags&NLM_F_CREATE
986 * */
987 switch (n->nlmsg_type) {
988 case RTM_NEWACTION:
989 /* we are going to assume all other flags
990 * imply create only if it doesnt exist
991 * Note that CREATE | EXCL implies that
992 * but since we want avoid ambiguity (eg when flags
993 * is zero) then just set this
994 */
995 if (n->nlmsg_flags&NLM_F_REPLACE)
996 ovr = 1;
997 replay:
998 ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr);
999 if (ret == -EAGAIN)
1000 goto replay;
1001 break;
1002 case RTM_DELACTION:
1003 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION);
1004 break;
1005 case RTM_GETACTION:
1006 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION);
1007 break;
1008 default:
1009 BUG();
1010 }
1011
1012 return ret;
1013 }
1014
1015 static struct nlattr *
1016 find_dump_kind(const struct nlmsghdr *n)
1017 {
1018 struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
1019 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1020 struct nlattr *nla[TCAA_MAX + 1];
1021 struct nlattr *kind;
1022
1023 if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0)
1024 return NULL;
1025 tb1 = nla[TCA_ACT_TAB];
1026 if (tb1 == NULL)
1027 return NULL;
1028
1029 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
1030 NLMSG_ALIGN(nla_len(tb1)), NULL) < 0)
1031 return NULL;
1032
1033 if (tb[1] == NULL)
1034 return NULL;
1035 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
1036 nla_len(tb[1]), NULL) < 0)
1037 return NULL;
1038 kind = tb2[TCA_ACT_KIND];
1039
1040 return kind;
1041 }
1042
1043 static int
1044 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1045 {
1046 struct net *net = sock_net(skb->sk);
1047 struct nlmsghdr *nlh;
1048 unsigned char *b = skb_tail_pointer(skb);
1049 struct nlattr *nest;
1050 struct tc_action_ops *a_o;
1051 struct tc_action a;
1052 int ret = 0;
1053 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
1054 struct nlattr *kind = find_dump_kind(cb->nlh);
1055
1056 if (!net_eq(net, &init_net))
1057 return 0;
1058
1059 if (kind == NULL) {
1060 printk("tc_dump_action: action bad kind\n");
1061 return 0;
1062 }
1063
1064 a_o = tc_lookup_action(kind);
1065 if (a_o == NULL) {
1066 return 0;
1067 }
1068
1069 memset(&a, 0, sizeof(struct tc_action));
1070 a.ops = a_o;
1071
1072 if (a_o->walk == NULL) {
1073 printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind);
1074 goto nla_put_failure;
1075 }
1076
1077 nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
1078 cb->nlh->nlmsg_type, sizeof(*t));
1079 t = NLMSG_DATA(nlh);
1080 t->tca_family = AF_UNSPEC;
1081 t->tca__pad1 = 0;
1082 t->tca__pad2 = 0;
1083
1084 nest = nla_nest_start(skb, TCA_ACT_TAB);
1085 if (nest == NULL)
1086 goto nla_put_failure;
1087
1088 ret = a_o->walk(skb, cb, RTM_GETACTION, &a);
1089 if (ret < 0)
1090 goto nla_put_failure;
1091
1092 if (ret > 0) {
1093 nla_nest_end(skb, nest);
1094 ret = skb->len;
1095 } else
1096 nla_nest_cancel(skb, nest);
1097
1098 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1099 if (NETLINK_CB(cb->skb).pid && ret)
1100 nlh->nlmsg_flags |= NLM_F_MULTI;
1101 module_put(a_o->owner);
1102 return skb->len;
1103
1104 nla_put_failure:
1105 nlmsg_failure:
1106 module_put(a_o->owner);
1107 nlmsg_trim(skb, b);
1108 return skb->len;
1109 }
1110
1111 static int __init tc_action_init(void)
1112 {
1113 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL);
1114 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL);
1115 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action);
1116
1117 return 0;
1118 }
1119
1120 subsys_initcall(tc_action_init);