drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / sched / cls_api.c
1 /*
2 * net/sched/cls_api.c Packet classifier API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 *
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14 *
15 */
16
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/kmod.h>
25 #include <linux/err.h>
26 #include <linux/slab.h>
27 #include <net/net_namespace.h>
28 #include <net/sock.h>
29 #include <net/netlink.h>
30 #include <net/pkt_sched.h>
31 #include <net/pkt_cls.h>
32
33 /* The list of all installed classifier types */
34
35 static struct tcf_proto_ops *tcf_proto_base __read_mostly;
36
37 /* Protects list of registered TC modules. It is pure SMP lock. */
38 static DEFINE_RWLOCK(cls_mod_lock);
39
40 /* Find classifier type by string name */
41
42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind)
43 {
44 const struct tcf_proto_ops *t = NULL;
45
46 if (kind) {
47 read_lock(&cls_mod_lock);
48 for (t = tcf_proto_base; t; t = t->next) {
49 if (nla_strcmp(kind, t->kind) == 0) {
50 if (!try_module_get(t->owner))
51 t = NULL;
52 break;
53 }
54 }
55 read_unlock(&cls_mod_lock);
56 }
57 return t;
58 }
59
60 /* Register(unregister) new classifier type */
61
62 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
63 {
64 struct tcf_proto_ops *t, **tp;
65 int rc = -EEXIST;
66
67 write_lock(&cls_mod_lock);
68 for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
69 if (!strcmp(ops->kind, t->kind))
70 goto out;
71
72 ops->next = NULL;
73 *tp = ops;
74 rc = 0;
75 out:
76 write_unlock(&cls_mod_lock);
77 return rc;
78 }
79 EXPORT_SYMBOL(register_tcf_proto_ops);
80
81 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
82 {
83 struct tcf_proto_ops *t, **tp;
84 int rc = -ENOENT;
85
86 write_lock(&cls_mod_lock);
87 for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
88 if (t == ops)
89 break;
90
91 if (!t)
92 goto out;
93 *tp = t->next;
94 rc = 0;
95 out:
96 write_unlock(&cls_mod_lock);
97 return rc;
98 }
99 EXPORT_SYMBOL(unregister_tcf_proto_ops);
100
101 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
102 struct nlmsghdr *n, struct tcf_proto *tp,
103 unsigned long fh, int event);
104
105
106 /* Select new prio value from the range, managed by kernel. */
107
108 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
109 {
110 u32 first = TC_H_MAKE(0xC0000000U, 0U);
111
112 if (tp)
113 first = tp->prio - 1;
114
115 return first;
116 }
117
118 /* Add/change/delete/get a filter node */
119
120 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
121 {
122 struct net *net = sock_net(skb->sk);
123 struct nlattr *tca[TCA_MAX + 1];
124 spinlock_t *root_lock;
125 struct tcmsg *t;
126 u32 protocol;
127 u32 prio;
128 u32 nprio;
129 u32 parent;
130 struct net_device *dev;
131 struct Qdisc *q;
132 struct tcf_proto **back, **chain;
133 struct tcf_proto *tp;
134 const struct tcf_proto_ops *tp_ops;
135 const struct Qdisc_class_ops *cops;
136 unsigned long cl;
137 unsigned long fh;
138 int err;
139 int tp_created;
140
141 if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN))
142 return -EPERM;
143
144 replay:
145 tp_created = 0;
146
147 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
148 if (err < 0)
149 return err;
150
151 t = nlmsg_data(n);
152 protocol = TC_H_MIN(t->tcm_info);
153 prio = TC_H_MAJ(t->tcm_info);
154 nprio = prio;
155 parent = t->tcm_parent;
156 cl = 0;
157
158 if (prio == 0) {
159 /* If no priority is given, user wants we allocated it. */
160 if (n->nlmsg_type != RTM_NEWTFILTER ||
161 !(n->nlmsg_flags & NLM_F_CREATE))
162 return -ENOENT;
163 prio = TC_H_MAKE(0x80000000U, 0U);
164 }
165
166 /* Find head of filter chain. */
167
168 /* Find link */
169 dev = __dev_get_by_index(net, t->tcm_ifindex);
170 if (dev == NULL)
171 return -ENODEV;
172
173 /* Find qdisc */
174 if (!parent) {
175 q = dev->qdisc;
176 parent = q->handle;
177 } else {
178 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
179 if (q == NULL)
180 return -EINVAL;
181 }
182
183 /* Is it classful? */
184 cops = q->ops->cl_ops;
185 if (!cops)
186 return -EINVAL;
187
188 if (cops->tcf_chain == NULL)
189 return -EOPNOTSUPP;
190
191 /* Do we search for filter, attached to class? */
192 if (TC_H_MIN(parent)) {
193 cl = cops->get(q, parent);
194 if (cl == 0)
195 return -ENOENT;
196 }
197
198 /* And the last stroke */
199 chain = cops->tcf_chain(q, cl);
200 err = -EINVAL;
201 if (chain == NULL)
202 goto errout;
203
204 /* Check the chain for existence of proto-tcf with this priority */
205 for (back = chain; (tp = *back) != NULL; back = &tp->next) {
206 if (tp->prio >= prio) {
207 if (tp->prio == prio) {
208 if (!nprio ||
209 (tp->protocol != protocol && protocol))
210 goto errout;
211 } else
212 tp = NULL;
213 break;
214 }
215 }
216
217 root_lock = qdisc_root_sleeping_lock(q);
218
219 if (tp == NULL) {
220 /* Proto-tcf does not exist, create new one */
221
222 if (tca[TCA_KIND] == NULL || !protocol)
223 goto errout;
224
225 err = -ENOENT;
226 if (n->nlmsg_type != RTM_NEWTFILTER ||
227 !(n->nlmsg_flags & NLM_F_CREATE))
228 goto errout;
229
230
231 /* Create new proto tcf */
232
233 err = -ENOBUFS;
234 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
235 if (tp == NULL)
236 goto errout;
237 err = -ENOENT;
238 tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]);
239 if (tp_ops == NULL) {
240 #ifdef CONFIG_MODULES
241 struct nlattr *kind = tca[TCA_KIND];
242 char name[IFNAMSIZ];
243
244 if (kind != NULL &&
245 nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
246 rtnl_unlock();
247 request_module("cls_%s", name);
248 rtnl_lock();
249 tp_ops = tcf_proto_lookup_ops(kind);
250 /* We dropped the RTNL semaphore in order to
251 * perform the module load. So, even if we
252 * succeeded in loading the module we have to
253 * replay the request. We indicate this using
254 * -EAGAIN.
255 */
256 if (tp_ops != NULL) {
257 module_put(tp_ops->owner);
258 err = -EAGAIN;
259 }
260 }
261 #endif
262 kfree(tp);
263 goto errout;
264 }
265 tp->ops = tp_ops;
266 tp->protocol = protocol;
267 tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back));
268 tp->q = q;
269 tp->classify = tp_ops->classify;
270 tp->classid = parent;
271
272 err = tp_ops->init(tp);
273 if (err != 0) {
274 module_put(tp_ops->owner);
275 kfree(tp);
276 goto errout;
277 }
278
279 tp_created = 1;
280
281 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind))
282 goto errout;
283
284 fh = tp->ops->get(tp, t->tcm_handle);
285
286 if (fh == 0) {
287 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
288 spin_lock_bh(root_lock);
289 *back = tp->next;
290 spin_unlock_bh(root_lock);
291
292 tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
293 tcf_destroy(tp);
294 err = 0;
295 goto errout;
296 }
297
298 err = -ENOENT;
299 if (n->nlmsg_type != RTM_NEWTFILTER ||
300 !(n->nlmsg_flags & NLM_F_CREATE))
301 goto errout;
302 } else {
303 switch (n->nlmsg_type) {
304 case RTM_NEWTFILTER:
305 err = -EEXIST;
306 if (n->nlmsg_flags & NLM_F_EXCL) {
307 if (tp_created)
308 tcf_destroy(tp);
309 goto errout;
310 }
311 break;
312 case RTM_DELTFILTER:
313 err = tp->ops->delete(tp, fh);
314 if (err == 0)
315 tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
316 goto errout;
317 case RTM_GETTFILTER:
318 err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
319 goto errout;
320 default:
321 err = -EINVAL;
322 goto errout;
323 }
324 }
325
326 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh);
327 if (err == 0) {
328 if (tp_created) {
329 spin_lock_bh(root_lock);
330 tp->next = *back;
331 *back = tp;
332 spin_unlock_bh(root_lock);
333 }
334 tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
335 } else {
336 if (tp_created)
337 tcf_destroy(tp);
338 }
339
340 errout:
341 if (cl)
342 cops->put(q, cl);
343 if (err == -EAGAIN)
344 /* Replay the request. */
345 goto replay;
346 return err;
347 }
348
349 static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
350 unsigned long fh, u32 portid, u32 seq, u16 flags, int event)
351 {
352 struct tcmsg *tcm;
353 struct nlmsghdr *nlh;
354 unsigned char *b = skb_tail_pointer(skb);
355
356 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
357 if (!nlh)
358 goto out_nlmsg_trim;
359 tcm = nlmsg_data(nlh);
360 tcm->tcm_family = AF_UNSPEC;
361 tcm->tcm__pad1 = 0;
362 tcm->tcm__pad2 = 0;
363 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
364 tcm->tcm_parent = tp->classid;
365 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
366 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
367 goto nla_put_failure;
368 tcm->tcm_handle = fh;
369 if (RTM_DELTFILTER != event) {
370 tcm->tcm_handle = 0;
371 if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0)
372 goto nla_put_failure;
373 }
374 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
375 return skb->len;
376
377 out_nlmsg_trim:
378 nla_put_failure:
379 nlmsg_trim(skb, b);
380 return -1;
381 }
382
383 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
384 struct nlmsghdr *n, struct tcf_proto *tp,
385 unsigned long fh, int event)
386 {
387 struct sk_buff *skb;
388 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
389
390 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
391 if (!skb)
392 return -ENOBUFS;
393
394 if (tcf_fill_node(skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) {
395 kfree_skb(skb);
396 return -EINVAL;
397 }
398
399 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
400 n->nlmsg_flags & NLM_F_ECHO);
401 }
402
403 struct tcf_dump_args {
404 struct tcf_walker w;
405 struct sk_buff *skb;
406 struct netlink_callback *cb;
407 };
408
409 static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
410 struct tcf_walker *arg)
411 {
412 struct tcf_dump_args *a = (void *)arg;
413
414 return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
415 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
416 }
417
418 /* called with RTNL */
419 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
420 {
421 struct net *net = sock_net(skb->sk);
422 int t;
423 int s_t;
424 struct net_device *dev;
425 struct Qdisc *q;
426 struct tcf_proto *tp, **chain;
427 struct tcmsg *tcm = nlmsg_data(cb->nlh);
428 unsigned long cl = 0;
429 const struct Qdisc_class_ops *cops;
430 struct tcf_dump_args arg;
431
432 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
433 return skb->len;
434 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
435 if (!dev)
436 return skb->len;
437
438 if (!tcm->tcm_parent)
439 q = dev->qdisc;
440 else
441 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
442 if (!q)
443 goto out;
444 cops = q->ops->cl_ops;
445 if (!cops)
446 goto errout;
447 if (cops->tcf_chain == NULL)
448 goto errout;
449 if (TC_H_MIN(tcm->tcm_parent)) {
450 cl = cops->get(q, tcm->tcm_parent);
451 if (cl == 0)
452 goto errout;
453 }
454 chain = cops->tcf_chain(q, cl);
455 if (chain == NULL)
456 goto errout;
457
458 s_t = cb->args[0];
459
460 for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
461 if (t < s_t)
462 continue;
463 if (TC_H_MAJ(tcm->tcm_info) &&
464 TC_H_MAJ(tcm->tcm_info) != tp->prio)
465 continue;
466 if (TC_H_MIN(tcm->tcm_info) &&
467 TC_H_MIN(tcm->tcm_info) != tp->protocol)
468 continue;
469 if (t > s_t)
470 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
471 if (cb->args[1] == 0) {
472 if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).portid,
473 cb->nlh->nlmsg_seq, NLM_F_MULTI,
474 RTM_NEWTFILTER) <= 0)
475 break;
476
477 cb->args[1] = 1;
478 }
479 if (tp->ops->walk == NULL)
480 continue;
481 arg.w.fn = tcf_node_dump;
482 arg.skb = skb;
483 arg.cb = cb;
484 arg.w.stop = 0;
485 arg.w.skip = cb->args[1] - 1;
486 arg.w.count = 0;
487 tp->ops->walk(tp, &arg.w);
488 cb->args[1] = arg.w.count + 1;
489 if (arg.w.stop)
490 break;
491 }
492
493 cb->args[0] = t;
494
495 errout:
496 if (cl)
497 cops->put(q, cl);
498 out:
499 return skb->len;
500 }
501
502 void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
503 {
504 #ifdef CONFIG_NET_CLS_ACT
505 if (exts->action) {
506 tcf_action_destroy(exts->action, TCA_ACT_UNBIND);
507 exts->action = NULL;
508 }
509 #endif
510 }
511 EXPORT_SYMBOL(tcf_exts_destroy);
512
513 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
514 struct nlattr *rate_tlv, struct tcf_exts *exts,
515 const struct tcf_ext_map *map)
516 {
517 memset(exts, 0, sizeof(*exts));
518
519 #ifdef CONFIG_NET_CLS_ACT
520 {
521 struct tc_action *act;
522
523 if (map->police && tb[map->police]) {
524 act = tcf_action_init_1(net, tb[map->police], rate_tlv,
525 "police", TCA_ACT_NOREPLACE,
526 TCA_ACT_BIND);
527 if (IS_ERR(act))
528 return PTR_ERR(act);
529
530 act->type = TCA_OLD_COMPAT;
531 exts->action = act;
532 } else if (map->action && tb[map->action]) {
533 act = tcf_action_init(net, tb[map->action], rate_tlv,
534 NULL, TCA_ACT_NOREPLACE,
535 TCA_ACT_BIND);
536 if (IS_ERR(act))
537 return PTR_ERR(act);
538
539 exts->action = act;
540 }
541 }
542 #else
543 if ((map->action && tb[map->action]) ||
544 (map->police && tb[map->police]))
545 return -EOPNOTSUPP;
546 #endif
547
548 return 0;
549 }
550 EXPORT_SYMBOL(tcf_exts_validate);
551
552 void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
553 struct tcf_exts *src)
554 {
555 #ifdef CONFIG_NET_CLS_ACT
556 if (src->action) {
557 struct tc_action *act;
558 tcf_tree_lock(tp);
559 act = dst->action;
560 dst->action = src->action;
561 tcf_tree_unlock(tp);
562 if (act)
563 tcf_action_destroy(act, TCA_ACT_UNBIND);
564 }
565 #endif
566 }
567 EXPORT_SYMBOL(tcf_exts_change);
568
569 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
570 const struct tcf_ext_map *map)
571 {
572 #ifdef CONFIG_NET_CLS_ACT
573 if (map->action && exts->action) {
574 /*
575 * again for backward compatible mode - we want
576 * to work with both old and new modes of entering
577 * tc data even if iproute2 was newer - jhs
578 */
579 struct nlattr *nest;
580
581 if (exts->action->type != TCA_OLD_COMPAT) {
582 nest = nla_nest_start(skb, map->action);
583 if (nest == NULL)
584 goto nla_put_failure;
585 if (tcf_action_dump(skb, exts->action, 0, 0) < 0)
586 goto nla_put_failure;
587 nla_nest_end(skb, nest);
588 } else if (map->police) {
589 nest = nla_nest_start(skb, map->police);
590 if (nest == NULL)
591 goto nla_put_failure;
592 if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0)
593 goto nla_put_failure;
594 nla_nest_end(skb, nest);
595 }
596 }
597 #endif
598 return 0;
599 nla_put_failure: __attribute__ ((unused))
600 return -1;
601 }
602 EXPORT_SYMBOL(tcf_exts_dump);
603
604
605 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
606 const struct tcf_ext_map *map)
607 {
608 #ifdef CONFIG_NET_CLS_ACT
609 if (exts->action)
610 if (tcf_action_copy_stats(skb, exts->action, 1) < 0)
611 goto nla_put_failure;
612 #endif
613 return 0;
614 nla_put_failure: __attribute__ ((unused))
615 return -1;
616 }
617 EXPORT_SYMBOL(tcf_exts_dump_stats);
618
619 static int __init tc_filter_init(void)
620 {
621 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL);
622 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, NULL);
623 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
624 tc_dump_tfilter, NULL);
625
626 return 0;
627 }
628
629 subsys_initcall(tc_filter_init);