2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
33 #include <net/net_namespace.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
38 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
39 struct nlmsghdr
*n
, u32 clid
,
40 struct Qdisc
*old
, struct Qdisc
*new);
41 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
42 struct nlmsghdr
*n
, struct Qdisc
*q
,
43 unsigned long cl
, int event
);
50 This file consists of two interrelated parts:
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
74 All real intelligent work is done inside qdisc modules.
78 Every discipline has two major routines: enqueue and dequeue.
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
105 like dequeue but without removing a packet from the queue
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
114 initializes newly created qdisc.
118 destroys resources allocated by init and during lifetime of qdisc.
122 changes qdisc parameters.
125 /* Protects list of registered TC modules. It is pure SMP lock. */
126 static DEFINE_RWLOCK(qdisc_mod_lock
);
129 /************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
134 /* The list of all installed queueing disciplines. */
136 static struct Qdisc_ops
*qdisc_base
;
138 /* Register/unregister queueing discipline */
140 int register_qdisc(struct Qdisc_ops
*qops
)
142 struct Qdisc_ops
*q
, **qp
;
145 write_lock(&qdisc_mod_lock
);
146 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
147 if (!strcmp(qops
->id
, q
->id
))
150 if (qops
->enqueue
== NULL
)
151 qops
->enqueue
= noop_qdisc_ops
.enqueue
;
152 if (qops
->peek
== NULL
) {
153 if (qops
->dequeue
== NULL
)
154 qops
->peek
= noop_qdisc_ops
.peek
;
158 if (qops
->dequeue
== NULL
)
159 qops
->dequeue
= noop_qdisc_ops
.dequeue
;
162 const struct Qdisc_class_ops
*cops
= qops
->cl_ops
;
164 if (!(cops
->get
&& cops
->put
&& cops
->walk
&& cops
->leaf
))
167 if (cops
->tcf_chain
&& !(cops
->bind_tcf
&& cops
->unbind_tcf
))
175 write_unlock(&qdisc_mod_lock
);
182 EXPORT_SYMBOL(register_qdisc
);
184 int unregister_qdisc(struct Qdisc_ops
*qops
)
186 struct Qdisc_ops
*q
, **qp
;
189 write_lock(&qdisc_mod_lock
);
190 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
198 write_unlock(&qdisc_mod_lock
);
201 EXPORT_SYMBOL(unregister_qdisc
);
203 /* Get default qdisc if not otherwise specified */
204 void qdisc_get_default(char *name
, size_t len
)
206 read_lock(&qdisc_mod_lock
);
207 strlcpy(name
, default_qdisc_ops
->id
, len
);
208 read_unlock(&qdisc_mod_lock
);
211 static struct Qdisc_ops
*qdisc_lookup_default(const char *name
)
213 struct Qdisc_ops
*q
= NULL
;
215 for (q
= qdisc_base
; q
; q
= q
->next
) {
216 if (!strcmp(name
, q
->id
)) {
217 if (!try_module_get(q
->owner
))
226 /* Set new default qdisc to use */
227 int qdisc_set_default(const char *name
)
229 const struct Qdisc_ops
*ops
;
231 if (!capable(CAP_NET_ADMIN
))
234 write_lock(&qdisc_mod_lock
);
235 ops
= qdisc_lookup_default(name
);
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock
);
239 request_module("sch_%s", name
);
240 write_lock(&qdisc_mod_lock
);
242 ops
= qdisc_lookup_default(name
);
246 /* Set new default */
247 module_put(default_qdisc_ops
->owner
);
248 default_qdisc_ops
= ops
;
250 write_unlock(&qdisc_mod_lock
);
252 return ops
? 0 : -ENOENT
;
255 /* We know handle. Find qdisc among all qdisc's attached to device
256 * (root qdisc, all its children, children of children etc.)
257 * Note: caller either uses rtnl or rcu_read_lock()
260 static struct Qdisc
*qdisc_match_from_root(struct Qdisc
*root
, u32 handle
)
264 if (!(root
->flags
& TCQ_F_BUILTIN
) &&
265 root
->handle
== handle
)
268 list_for_each_entry_rcu(q
, &root
->list
, list
) {
269 if (q
->handle
== handle
)
275 void qdisc_list_add(struct Qdisc
*q
)
277 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
)) {
278 struct Qdisc
*root
= qdisc_dev(q
)->qdisc
;
280 WARN_ON_ONCE(root
== &noop_qdisc
);
282 list_add_tail_rcu(&q
->list
, &root
->list
);
285 EXPORT_SYMBOL(qdisc_list_add
);
287 void qdisc_list_del(struct Qdisc
*q
)
289 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
)) {
291 list_del_rcu(&q
->list
);
294 EXPORT_SYMBOL(qdisc_list_del
);
296 struct Qdisc
*qdisc_lookup(struct net_device
*dev
, u32 handle
)
300 q
= qdisc_match_from_root(dev
->qdisc
, handle
);
304 if (dev_ingress_queue(dev
))
305 q
= qdisc_match_from_root(
306 dev_ingress_queue(dev
)->qdisc_sleeping
,
312 static struct Qdisc
*qdisc_leaf(struct Qdisc
*p
, u32 classid
)
316 const struct Qdisc_class_ops
*cops
= p
->ops
->cl_ops
;
320 cl
= cops
->get(p
, classid
);
324 leaf
= cops
->leaf(p
, cl
);
329 /* Find queueing discipline by name */
331 static struct Qdisc_ops
*qdisc_lookup_ops(struct nlattr
*kind
)
333 struct Qdisc_ops
*q
= NULL
;
336 read_lock(&qdisc_mod_lock
);
337 for (q
= qdisc_base
; q
; q
= q
->next
) {
338 if (nla_strcmp(kind
, q
->id
) == 0) {
339 if (!try_module_get(q
->owner
))
344 read_unlock(&qdisc_mod_lock
);
349 /* The linklayer setting were not transferred from iproute2, in older
350 * versions, and the rate tables lookup systems have been dropped in
351 * the kernel. To keep backward compatible with older iproute2 tc
352 * utils, we detect the linklayer setting by detecting if the rate
353 * table were modified.
355 * For linklayer ATM table entries, the rate table will be aligned to
356 * 48 bytes, thus some table entries will contain the same value. The
357 * mpu (min packet unit) is also encoded into the old rate table, thus
358 * starting from the mpu, we find low and high table entries for
359 * mapping this cell. If these entries contain the same value, when
360 * the rate tables have been modified for linklayer ATM.
362 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
363 * and then roundup to the next cell, calc the table entry one below,
366 static __u8
__detect_linklayer(struct tc_ratespec
*r
, __u32
*rtab
)
368 int low
= roundup(r
->mpu
, 48);
369 int high
= roundup(low
+1, 48);
370 int cell_low
= low
>> r
->cell_log
;
371 int cell_high
= (high
>> r
->cell_log
) - 1;
373 /* rtab is too inaccurate at rates > 100Mbit/s */
374 if ((r
->rate
> (100000000/8)) || (rtab
[0] == 0)) {
375 pr_debug("TC linklayer: Giving up ATM detection\n");
376 return TC_LINKLAYER_ETHERNET
;
379 if ((cell_high
> cell_low
) && (cell_high
< 256)
380 && (rtab
[cell_low
] == rtab
[cell_high
])) {
381 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
382 cell_low
, cell_high
, rtab
[cell_high
]);
383 return TC_LINKLAYER_ATM
;
385 return TC_LINKLAYER_ETHERNET
;
388 static struct qdisc_rate_table
*qdisc_rtab_list
;
390 struct qdisc_rate_table
*qdisc_get_rtab(struct tc_ratespec
*r
, struct nlattr
*tab
)
392 struct qdisc_rate_table
*rtab
;
394 if (tab
== NULL
|| r
->rate
== 0 || r
->cell_log
== 0 ||
395 nla_len(tab
) != TC_RTAB_SIZE
)
398 for (rtab
= qdisc_rtab_list
; rtab
; rtab
= rtab
->next
) {
399 if (!memcmp(&rtab
->rate
, r
, sizeof(struct tc_ratespec
)) &&
400 !memcmp(&rtab
->data
, nla_data(tab
), 1024)) {
406 rtab
= kmalloc(sizeof(*rtab
), GFP_KERNEL
);
410 memcpy(rtab
->data
, nla_data(tab
), 1024);
411 if (r
->linklayer
== TC_LINKLAYER_UNAWARE
)
412 r
->linklayer
= __detect_linklayer(r
, rtab
->data
);
413 rtab
->next
= qdisc_rtab_list
;
414 qdisc_rtab_list
= rtab
;
418 EXPORT_SYMBOL(qdisc_get_rtab
);
420 void qdisc_put_rtab(struct qdisc_rate_table
*tab
)
422 struct qdisc_rate_table
*rtab
, **rtabp
;
424 if (!tab
|| --tab
->refcnt
)
427 for (rtabp
= &qdisc_rtab_list
;
428 (rtab
= *rtabp
) != NULL
;
429 rtabp
= &rtab
->next
) {
437 EXPORT_SYMBOL(qdisc_put_rtab
);
439 static LIST_HEAD(qdisc_stab_list
);
440 static DEFINE_SPINLOCK(qdisc_stab_lock
);
442 static const struct nla_policy stab_policy
[TCA_STAB_MAX
+ 1] = {
443 [TCA_STAB_BASE
] = { .len
= sizeof(struct tc_sizespec
) },
444 [TCA_STAB_DATA
] = { .type
= NLA_BINARY
},
447 static struct qdisc_size_table
*qdisc_get_stab(struct nlattr
*opt
)
449 struct nlattr
*tb
[TCA_STAB_MAX
+ 1];
450 struct qdisc_size_table
*stab
;
451 struct tc_sizespec
*s
;
452 unsigned int tsize
= 0;
456 err
= nla_parse_nested(tb
, TCA_STAB_MAX
, opt
, stab_policy
);
459 if (!tb
[TCA_STAB_BASE
])
460 return ERR_PTR(-EINVAL
);
462 s
= nla_data(tb
[TCA_STAB_BASE
]);
465 if (!tb
[TCA_STAB_DATA
])
466 return ERR_PTR(-EINVAL
);
467 tab
= nla_data(tb
[TCA_STAB_DATA
]);
468 tsize
= nla_len(tb
[TCA_STAB_DATA
]) / sizeof(u16
);
471 if (tsize
!= s
->tsize
|| (!tab
&& tsize
> 0))
472 return ERR_PTR(-EINVAL
);
474 spin_lock(&qdisc_stab_lock
);
476 list_for_each_entry(stab
, &qdisc_stab_list
, list
) {
477 if (memcmp(&stab
->szopts
, s
, sizeof(*s
)))
479 if (tsize
> 0 && memcmp(stab
->data
, tab
, tsize
* sizeof(u16
)))
482 spin_unlock(&qdisc_stab_lock
);
486 spin_unlock(&qdisc_stab_lock
);
488 stab
= kmalloc(sizeof(*stab
) + tsize
* sizeof(u16
), GFP_KERNEL
);
490 return ERR_PTR(-ENOMEM
);
495 memcpy(stab
->data
, tab
, tsize
* sizeof(u16
));
497 spin_lock(&qdisc_stab_lock
);
498 list_add_tail(&stab
->list
, &qdisc_stab_list
);
499 spin_unlock(&qdisc_stab_lock
);
504 static void stab_kfree_rcu(struct rcu_head
*head
)
506 kfree(container_of(head
, struct qdisc_size_table
, rcu
));
509 void qdisc_put_stab(struct qdisc_size_table
*tab
)
514 spin_lock(&qdisc_stab_lock
);
516 if (--tab
->refcnt
== 0) {
517 list_del(&tab
->list
);
518 call_rcu_bh(&tab
->rcu
, stab_kfree_rcu
);
521 spin_unlock(&qdisc_stab_lock
);
523 EXPORT_SYMBOL(qdisc_put_stab
);
525 static int qdisc_dump_stab(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
529 nest
= nla_nest_start(skb
, TCA_STAB
);
531 goto nla_put_failure
;
532 if (nla_put(skb
, TCA_STAB_BASE
, sizeof(stab
->szopts
), &stab
->szopts
))
533 goto nla_put_failure
;
534 nla_nest_end(skb
, nest
);
542 void __qdisc_calculate_pkt_len(struct sk_buff
*skb
, const struct qdisc_size_table
*stab
)
546 pkt_len
= skb
->len
+ stab
->szopts
.overhead
;
547 if (unlikely(!stab
->szopts
.tsize
))
550 slot
= pkt_len
+ stab
->szopts
.cell_align
;
551 if (unlikely(slot
< 0))
554 slot
>>= stab
->szopts
.cell_log
;
555 if (likely(slot
< stab
->szopts
.tsize
))
556 pkt_len
= stab
->data
[slot
];
558 pkt_len
= stab
->data
[stab
->szopts
.tsize
- 1] *
559 (slot
/ stab
->szopts
.tsize
) +
560 stab
->data
[slot
% stab
->szopts
.tsize
];
562 pkt_len
<<= stab
->szopts
.size_log
;
564 if (unlikely(pkt_len
< 1))
566 qdisc_skb_cb(skb
)->pkt_len
= pkt_len
;
568 EXPORT_SYMBOL(__qdisc_calculate_pkt_len
);
570 void qdisc_warn_nonwc(const char *txt
, struct Qdisc
*qdisc
)
572 if (!(qdisc
->flags
& TCQ_F_WARN_NONWC
)) {
573 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
574 txt
, qdisc
->ops
->id
, qdisc
->handle
>> 16);
575 qdisc
->flags
|= TCQ_F_WARN_NONWC
;
578 EXPORT_SYMBOL(qdisc_warn_nonwc
);
580 static enum hrtimer_restart
qdisc_watchdog(struct hrtimer
*timer
)
582 struct qdisc_watchdog
*wd
= container_of(timer
, struct qdisc_watchdog
,
586 qdisc_unthrottled(wd
->qdisc
);
587 __netif_schedule(qdisc_root(wd
->qdisc
));
590 return HRTIMER_NORESTART
;
593 void qdisc_watchdog_init(struct qdisc_watchdog
*wd
, struct Qdisc
*qdisc
)
595 hrtimer_init(&wd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_PINNED
);
596 wd
->timer
.function
= qdisc_watchdog
;
599 EXPORT_SYMBOL(qdisc_watchdog_init
);
601 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog
*wd
, u64 expires
, bool throttle
)
603 if (test_bit(__QDISC_STATE_DEACTIVATED
,
604 &qdisc_root_sleeping(wd
->qdisc
)->state
))
608 qdisc_throttled(wd
->qdisc
);
610 hrtimer_start(&wd
->timer
,
611 ns_to_ktime(expires
),
612 HRTIMER_MODE_ABS_PINNED
);
614 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns
);
616 void qdisc_watchdog_cancel(struct qdisc_watchdog
*wd
)
618 hrtimer_cancel(&wd
->timer
);
619 qdisc_unthrottled(wd
->qdisc
);
621 EXPORT_SYMBOL(qdisc_watchdog_cancel
);
623 static struct hlist_head
*qdisc_class_hash_alloc(unsigned int n
)
625 unsigned int size
= n
* sizeof(struct hlist_head
), i
;
626 struct hlist_head
*h
;
628 if (size
<= PAGE_SIZE
)
629 h
= kmalloc(size
, GFP_KERNEL
);
631 h
= (struct hlist_head
*)
632 __get_free_pages(GFP_KERNEL
, get_order(size
));
635 for (i
= 0; i
< n
; i
++)
636 INIT_HLIST_HEAD(&h
[i
]);
641 static void qdisc_class_hash_free(struct hlist_head
*h
, unsigned int n
)
643 unsigned int size
= n
* sizeof(struct hlist_head
);
645 if (size
<= PAGE_SIZE
)
648 free_pages((unsigned long)h
, get_order(size
));
651 void qdisc_class_hash_grow(struct Qdisc
*sch
, struct Qdisc_class_hash
*clhash
)
653 struct Qdisc_class_common
*cl
;
654 struct hlist_node
*next
;
655 struct hlist_head
*nhash
, *ohash
;
656 unsigned int nsize
, nmask
, osize
;
659 /* Rehash when load factor exceeds 0.75 */
660 if (clhash
->hashelems
* 4 <= clhash
->hashsize
* 3)
662 nsize
= clhash
->hashsize
* 2;
664 nhash
= qdisc_class_hash_alloc(nsize
);
668 ohash
= clhash
->hash
;
669 osize
= clhash
->hashsize
;
672 for (i
= 0; i
< osize
; i
++) {
673 hlist_for_each_entry_safe(cl
, next
, &ohash
[i
], hnode
) {
674 h
= qdisc_class_hash(cl
->classid
, nmask
);
675 hlist_add_head(&cl
->hnode
, &nhash
[h
]);
678 clhash
->hash
= nhash
;
679 clhash
->hashsize
= nsize
;
680 clhash
->hashmask
= nmask
;
681 sch_tree_unlock(sch
);
683 qdisc_class_hash_free(ohash
, osize
);
685 EXPORT_SYMBOL(qdisc_class_hash_grow
);
687 int qdisc_class_hash_init(struct Qdisc_class_hash
*clhash
)
689 unsigned int size
= 4;
691 clhash
->hash
= qdisc_class_hash_alloc(size
);
692 if (clhash
->hash
== NULL
)
694 clhash
->hashsize
= size
;
695 clhash
->hashmask
= size
- 1;
696 clhash
->hashelems
= 0;
699 EXPORT_SYMBOL(qdisc_class_hash_init
);
701 void qdisc_class_hash_destroy(struct Qdisc_class_hash
*clhash
)
703 qdisc_class_hash_free(clhash
->hash
, clhash
->hashsize
);
705 EXPORT_SYMBOL(qdisc_class_hash_destroy
);
707 void qdisc_class_hash_insert(struct Qdisc_class_hash
*clhash
,
708 struct Qdisc_class_common
*cl
)
712 INIT_HLIST_NODE(&cl
->hnode
);
713 h
= qdisc_class_hash(cl
->classid
, clhash
->hashmask
);
714 hlist_add_head(&cl
->hnode
, &clhash
->hash
[h
]);
717 EXPORT_SYMBOL(qdisc_class_hash_insert
);
719 void qdisc_class_hash_remove(struct Qdisc_class_hash
*clhash
,
720 struct Qdisc_class_common
*cl
)
722 hlist_del(&cl
->hnode
);
725 EXPORT_SYMBOL(qdisc_class_hash_remove
);
727 /* Allocate an unique handle from space managed by kernel
728 * Possible range is [8000-FFFF]:0000 (0x8000 values)
730 static u32
qdisc_alloc_handle(struct net_device
*dev
)
733 static u32 autohandle
= TC_H_MAKE(0x80000000U
, 0);
736 autohandle
+= TC_H_MAKE(0x10000U
, 0);
737 if (autohandle
== TC_H_MAKE(TC_H_ROOT
, 0))
738 autohandle
= TC_H_MAKE(0x80000000U
, 0);
739 if (!qdisc_lookup(dev
, autohandle
))
747 void qdisc_tree_reduce_backlog(struct Qdisc
*sch
, unsigned int n
,
750 const struct Qdisc_class_ops
*cops
;
755 if (n
== 0 && len
== 0)
757 drops
= max_t(int, n
, 0);
759 while ((parentid
= sch
->parent
)) {
760 if (TC_H_MAJ(parentid
) == TC_H_MAJ(TC_H_INGRESS
))
763 if (sch
->flags
& TCQ_F_NOPARENT
)
765 /* TODO: perform the search on a per txq basis */
766 sch
= qdisc_lookup(qdisc_dev(sch
), TC_H_MAJ(parentid
));
768 WARN_ON_ONCE(parentid
!= TC_H_ROOT
);
771 cops
= sch
->ops
->cl_ops
;
772 if (cops
->qlen_notify
) {
773 cl
= cops
->get(sch
, parentid
);
774 cops
->qlen_notify(sch
, cl
);
778 sch
->qstats
.backlog
-= len
;
779 __qdisc_qstats_drop(sch
, drops
);
783 EXPORT_SYMBOL(qdisc_tree_reduce_backlog
);
785 static void notify_and_destroy(struct net
*net
, struct sk_buff
*skb
,
786 struct nlmsghdr
*n
, u32 clid
,
787 struct Qdisc
*old
, struct Qdisc
*new)
790 qdisc_notify(net
, skb
, n
, clid
, old
, new);
796 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
799 * When appropriate send a netlink notification using 'skb'
802 * On success, destroy old qdisc.
805 static int qdisc_graft(struct net_device
*dev
, struct Qdisc
*parent
,
806 struct sk_buff
*skb
, struct nlmsghdr
*n
, u32 classid
,
807 struct Qdisc
*new, struct Qdisc
*old
)
809 struct Qdisc
*q
= old
;
810 struct net
*net
= dev_net(dev
);
813 if (parent
== NULL
) {
814 unsigned int i
, num_q
, ingress
;
817 num_q
= dev
->num_tx_queues
;
818 if ((q
&& q
->flags
& TCQ_F_INGRESS
) ||
819 (new && new->flags
& TCQ_F_INGRESS
)) {
822 if (!dev_ingress_queue(dev
))
826 if (dev
->flags
& IFF_UP
)
829 if (new && new->ops
->attach
)
832 for (i
= 0; i
< num_q
; i
++) {
833 struct netdev_queue
*dev_queue
= dev_ingress_queue(dev
);
836 dev_queue
= netdev_get_tx_queue(dev
, i
);
838 old
= dev_graft_qdisc(dev_queue
, new);
840 atomic_inc(&new->refcnt
);
848 notify_and_destroy(net
, skb
, n
, classid
,
850 if (new && !new->ops
->attach
)
851 atomic_inc(&new->refcnt
);
852 dev
->qdisc
= new ? : &noop_qdisc
;
854 if (new && new->ops
->attach
)
855 new->ops
->attach(new);
857 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
860 if (dev
->flags
& IFF_UP
)
863 const struct Qdisc_class_ops
*cops
= parent
->ops
->cl_ops
;
866 if (cops
&& cops
->graft
) {
867 unsigned long cl
= cops
->get(parent
, classid
);
869 err
= cops
->graft(parent
, cl
, new, &old
);
870 cops
->put(parent
, cl
);
875 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
880 /* lockdep annotation is needed for ingress; egress gets it only for name */
881 static struct lock_class_key qdisc_tx_lock
;
882 static struct lock_class_key qdisc_rx_lock
;
885 Allocate and initialize new qdisc.
887 Parameters are passed via opt.
890 static struct Qdisc
*
891 qdisc_create(struct net_device
*dev
, struct netdev_queue
*dev_queue
,
892 struct Qdisc
*p
, u32 parent
, u32 handle
,
893 struct nlattr
**tca
, int *errp
)
896 struct nlattr
*kind
= tca
[TCA_KIND
];
898 struct Qdisc_ops
*ops
;
899 struct qdisc_size_table
*stab
;
901 ops
= qdisc_lookup_ops(kind
);
902 #ifdef CONFIG_MODULES
903 if (ops
== NULL
&& kind
!= NULL
) {
905 if (nla_strlcpy(name
, kind
, IFNAMSIZ
) < IFNAMSIZ
) {
906 /* We dropped the RTNL semaphore in order to
907 * perform the module load. So, even if we
908 * succeeded in loading the module we have to
909 * tell the caller to replay the request. We
910 * indicate this using -EAGAIN.
911 * We replay the request because the device may
912 * go away in the mean time.
915 request_module("sch_%s", name
);
917 ops
= qdisc_lookup_ops(kind
);
919 /* We will try again qdisc_lookup_ops,
920 * so don't keep a reference.
922 module_put(ops
->owner
);
934 sch
= qdisc_alloc(dev_queue
, ops
);
940 sch
->parent
= parent
;
942 if (handle
== TC_H_INGRESS
) {
943 sch
->flags
|= TCQ_F_INGRESS
;
944 handle
= TC_H_MAKE(TC_H_INGRESS
, 0);
945 lockdep_set_class(qdisc_lock(sch
), &qdisc_rx_lock
);
948 handle
= qdisc_alloc_handle(dev
);
953 lockdep_set_class(qdisc_lock(sch
), &qdisc_tx_lock
);
954 if (!netif_is_multiqueue(dev
))
955 sch
->flags
|= TCQ_F_ONETXQUEUE
;
958 sch
->handle
= handle
;
960 if (!ops
->init
|| (err
= ops
->init(sch
, tca
[TCA_OPTIONS
])) == 0) {
961 if (qdisc_is_percpu_stats(sch
)) {
963 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu
);
964 if (!sch
->cpu_bstats
)
967 sch
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
968 if (!sch
->cpu_qstats
)
973 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
978 rcu_assign_pointer(sch
->stab
, stab
);
981 spinlock_t
*root_lock
;
984 if (sch
->flags
& TCQ_F_MQROOT
)
987 if ((sch
->parent
!= TC_H_ROOT
) &&
988 !(sch
->flags
& TCQ_F_INGRESS
) &&
989 (!p
|| !(p
->flags
& TCQ_F_MQROOT
)))
990 root_lock
= qdisc_root_sleeping_lock(sch
);
992 root_lock
= qdisc_lock(sch
);
994 err
= gen_new_estimator(&sch
->bstats
,
1003 qdisc_list_add(sch
);
1007 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1012 kfree((char *) sch
- sch
->padded
);
1014 module_put(ops
->owner
);
1020 free_percpu(sch
->cpu_bstats
);
1021 free_percpu(sch
->cpu_qstats
);
1023 * Any broken qdiscs that would require a ops->reset() here?
1024 * The qdisc was never in action so it shouldn't be necessary.
1026 qdisc_put_stab(rtnl_dereference(sch
->stab
));
1032 static int qdisc_change(struct Qdisc
*sch
, struct nlattr
**tca
)
1034 struct qdisc_size_table
*ostab
, *stab
= NULL
;
1037 if (tca
[TCA_OPTIONS
]) {
1038 if (sch
->ops
->change
== NULL
)
1040 err
= sch
->ops
->change(sch
, tca
[TCA_OPTIONS
]);
1045 if (tca
[TCA_STAB
]) {
1046 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
1048 return PTR_ERR(stab
);
1051 ostab
= rtnl_dereference(sch
->stab
);
1052 rcu_assign_pointer(sch
->stab
, stab
);
1053 qdisc_put_stab(ostab
);
1055 if (tca
[TCA_RATE
]) {
1056 /* NB: ignores errors from replace_estimator
1057 because change can't be undone. */
1058 if (sch
->flags
& TCQ_F_MQROOT
)
1060 gen_replace_estimator(&sch
->bstats
,
1063 qdisc_root_sleeping_lock(sch
),
1070 struct check_loop_arg
{
1071 struct qdisc_walker w
;
1076 static int check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
);
1078 static int check_loop(struct Qdisc
*q
, struct Qdisc
*p
, int depth
)
1080 struct check_loop_arg arg
;
1082 if (q
->ops
->cl_ops
== NULL
)
1085 arg
.w
.stop
= arg
.w
.skip
= arg
.w
.count
= 0;
1086 arg
.w
.fn
= check_loop_fn
;
1089 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1090 return arg
.w
.stop
? -ELOOP
: 0;
1094 check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
)
1097 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1098 struct check_loop_arg
*arg
= (struct check_loop_arg
*)w
;
1100 leaf
= cops
->leaf(q
, cl
);
1102 if (leaf
== arg
->p
|| arg
->depth
> 7)
1104 return check_loop(leaf
, arg
->p
, arg
->depth
+ 1);
1113 static int tc_get_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1115 struct net
*net
= sock_net(skb
->sk
);
1116 struct tcmsg
*tcm
= nlmsg_data(n
);
1117 struct nlattr
*tca
[TCA_MAX
+ 1];
1118 struct net_device
*dev
;
1120 struct Qdisc
*q
= NULL
;
1121 struct Qdisc
*p
= NULL
;
1124 if ((n
->nlmsg_type
!= RTM_GETQDISC
) &&
1125 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1128 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1132 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1136 clid
= tcm
->tcm_parent
;
1138 if (clid
!= TC_H_ROOT
) {
1139 if (TC_H_MAJ(clid
) != TC_H_MAJ(TC_H_INGRESS
)) {
1140 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1143 q
= qdisc_leaf(p
, clid
);
1144 } else if (dev_ingress_queue(dev
)) {
1145 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1153 if (tcm
->tcm_handle
&& q
->handle
!= tcm
->tcm_handle
)
1156 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1161 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1164 if (n
->nlmsg_type
== RTM_DELQDISC
) {
1169 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, NULL
, q
);
1173 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1179 * enable/disable flow on qdisc.
1182 tc_qdisc_flow_control(struct net_device
*dev
, u32 tcm_handle
, int enable_flow
)
1185 struct __qdisc_change_req
{
1187 struct tc_prio_qopt data
;
1189 .attr
= {sizeof(struct __qdisc_change_req
), TCA_OPTIONS
},
1190 .data
= {3, {1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}, 1}
1193 /* override flow bit */
1194 req
.data
.enable_flow
= enable_flow
;
1196 /* look up using tcm handle */
1197 q
= qdisc_lookup(dev
, tcm_handle
);
1199 /* call registered change function */
1201 if (q
->ops
->change(q
, &(req
.attr
)) != 0)
1202 pr_err("tc_qdisc_flow_control: qdisc change failed");
1205 EXPORT_SYMBOL(tc_qdisc_flow_control
);
1208 * Create/change qdisc.
1211 static int tc_modify_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1213 struct net
*net
= sock_net(skb
->sk
);
1215 struct nlattr
*tca
[TCA_MAX
+ 1];
1216 struct net_device
*dev
;
1218 struct Qdisc
*q
, *p
;
1221 if (!netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1225 /* Reinit, just in case something touches this. */
1226 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1230 tcm
= nlmsg_data(n
);
1231 clid
= tcm
->tcm_parent
;
1234 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1240 if (clid
!= TC_H_ROOT
) {
1241 if (clid
!= TC_H_INGRESS
) {
1242 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1245 q
= qdisc_leaf(p
, clid
);
1246 } else if (dev_ingress_queue_create(dev
)) {
1247 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1253 /* It may be default qdisc, ignore it */
1254 if (q
&& q
->handle
== 0)
1257 if (!q
|| !tcm
->tcm_handle
|| q
->handle
!= tcm
->tcm_handle
) {
1258 if (tcm
->tcm_handle
) {
1259 if (q
&& !(n
->nlmsg_flags
& NLM_F_REPLACE
))
1261 if (TC_H_MIN(tcm
->tcm_handle
))
1263 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1265 goto create_n_graft
;
1266 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1268 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1271 (p
&& check_loop(q
, p
, 0)))
1273 atomic_inc(&q
->refcnt
);
1277 goto create_n_graft
;
1279 /* This magic test requires explanation.
1281 * We know, that some child q is already
1282 * attached to this parent and have choice:
1283 * either to change it or to create/graft new one.
1285 * 1. We are allowed to create/graft only
1286 * if CREATE and REPLACE flags are set.
1288 * 2. If EXCL is set, requestor wanted to say,
1289 * that qdisc tcm_handle is not expected
1290 * to exist, so that we choose create/graft too.
1292 * 3. The last case is when no flags are set.
1293 * Alas, it is sort of hole in API, we
1294 * cannot decide what to do unambiguously.
1295 * For now we select create/graft, if
1296 * user gave KIND, which does not match existing.
1298 if ((n
->nlmsg_flags
& NLM_F_CREATE
) &&
1299 (n
->nlmsg_flags
& NLM_F_REPLACE
) &&
1300 ((n
->nlmsg_flags
& NLM_F_EXCL
) ||
1302 nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))))
1303 goto create_n_graft
;
1307 if (!tcm
->tcm_handle
)
1309 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1312 /* Change qdisc parameters */
1315 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1317 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1319 err
= qdisc_change(q
, tca
);
1321 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1325 if (!(n
->nlmsg_flags
& NLM_F_CREATE
))
1327 if (clid
== TC_H_INGRESS
) {
1328 if (dev_ingress_queue(dev
))
1329 q
= qdisc_create(dev
, dev_ingress_queue(dev
), p
,
1330 tcm
->tcm_parent
, tcm
->tcm_parent
,
1335 struct netdev_queue
*dev_queue
;
1337 if (p
&& p
->ops
->cl_ops
&& p
->ops
->cl_ops
->select_queue
)
1338 dev_queue
= p
->ops
->cl_ops
->select_queue(p
, tcm
);
1340 dev_queue
= p
->dev_queue
;
1342 dev_queue
= netdev_get_tx_queue(dev
, 0);
1344 q
= qdisc_create(dev
, dev_queue
, p
,
1345 tcm
->tcm_parent
, tcm
->tcm_handle
,
1355 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, q
, NULL
);
1365 static int tc_fill_qdisc(struct sk_buff
*skb
, struct Qdisc
*q
, u32 clid
,
1366 u32 portid
, u32 seq
, u16 flags
, int event
)
1368 struct gnet_stats_basic_cpu __percpu
*cpu_bstats
= NULL
;
1369 struct gnet_stats_queue __percpu
*cpu_qstats
= NULL
;
1371 struct nlmsghdr
*nlh
;
1372 unsigned char *b
= skb_tail_pointer(skb
);
1374 struct qdisc_size_table
*stab
;
1378 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1380 goto out_nlmsg_trim
;
1381 tcm
= nlmsg_data(nlh
);
1382 tcm
->tcm_family
= AF_UNSPEC
;
1385 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1386 tcm
->tcm_parent
= clid
;
1387 tcm
->tcm_handle
= q
->handle
;
1388 tcm
->tcm_info
= atomic_read(&q
->refcnt
);
1389 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1390 goto nla_put_failure
;
1391 if (q
->ops
->dump
&& q
->ops
->dump(q
, skb
) < 0)
1392 goto nla_put_failure
;
1395 stab
= rtnl_dereference(q
->stab
);
1396 if (stab
&& qdisc_dump_stab(skb
, stab
) < 0)
1397 goto nla_put_failure
;
1399 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1400 qdisc_root_sleeping_lock(q
), &d
) < 0)
1401 goto nla_put_failure
;
1403 if (q
->ops
->dump_stats
&& q
->ops
->dump_stats(q
, &d
) < 0)
1404 goto nla_put_failure
;
1406 if (qdisc_is_percpu_stats(q
)) {
1407 cpu_bstats
= q
->cpu_bstats
;
1408 cpu_qstats
= q
->cpu_qstats
;
1411 if (gnet_stats_copy_basic(&d
, cpu_bstats
, &q
->bstats
) < 0 ||
1412 gnet_stats_copy_rate_est(&d
, &q
->bstats
, &q
->rate_est
) < 0 ||
1413 gnet_stats_copy_queue(&d
, cpu_qstats
, &q
->qstats
, qlen
) < 0)
1414 goto nla_put_failure
;
1416 if (gnet_stats_finish_copy(&d
) < 0)
1417 goto nla_put_failure
;
1419 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1428 static bool tc_qdisc_dump_ignore(struct Qdisc
*q
)
1430 return (q
->flags
& TCQ_F_BUILTIN
) ? true : false;
1433 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
1434 struct nlmsghdr
*n
, u32 clid
,
1435 struct Qdisc
*old
, struct Qdisc
*new)
1437 struct sk_buff
*skb
;
1438 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1440 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1444 if (old
&& !tc_qdisc_dump_ignore(old
)) {
1445 if (tc_fill_qdisc(skb
, old
, clid
, portid
, n
->nlmsg_seq
,
1446 0, RTM_DELQDISC
) < 0)
1449 if (new && !tc_qdisc_dump_ignore(new)) {
1450 if (tc_fill_qdisc(skb
, new, clid
, portid
, n
->nlmsg_seq
,
1451 old
? NLM_F_REPLACE
: 0, RTM_NEWQDISC
) < 0)
1456 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1457 n
->nlmsg_flags
& NLM_F_ECHO
);
1464 static int tc_dump_qdisc_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1465 struct netlink_callback
*cb
,
1466 int *q_idx_p
, int s_q_idx
)
1468 int ret
= 0, q_idx
= *q_idx_p
;
1475 if (q_idx
< s_q_idx
) {
1478 if (!tc_qdisc_dump_ignore(q
) &&
1479 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1480 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1484 list_for_each_entry(q
, &root
->list
, list
) {
1485 if (q_idx
< s_q_idx
) {
1489 if (!tc_qdisc_dump_ignore(q
) &&
1490 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1491 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1504 static int tc_dump_qdisc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1506 struct net
*net
= sock_net(skb
->sk
);
1509 struct net_device
*dev
;
1511 s_idx
= cb
->args
[0];
1512 s_q_idx
= q_idx
= cb
->args
[1];
1516 for_each_netdev(net
, dev
) {
1517 struct netdev_queue
*dev_queue
;
1525 if (tc_dump_qdisc_root(dev
->qdisc
, skb
, cb
, &q_idx
, s_q_idx
) < 0)
1528 dev_queue
= dev_ingress_queue(dev
);
1530 tc_dump_qdisc_root(dev_queue
->qdisc_sleeping
, skb
, cb
,
1531 &q_idx
, s_q_idx
) < 0)
1540 cb
->args
[1] = q_idx
;
1547 /************************************************
1548 * Traffic classes manipulation. *
1549 ************************************************/
1553 static int tc_ctl_tclass(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1555 struct net
*net
= sock_net(skb
->sk
);
1556 struct tcmsg
*tcm
= nlmsg_data(n
);
1557 struct nlattr
*tca
[TCA_MAX
+ 1];
1558 struct net_device
*dev
;
1559 struct Qdisc
*q
= NULL
;
1560 const struct Qdisc_class_ops
*cops
;
1561 unsigned long cl
= 0;
1562 unsigned long new_cl
;
1568 if ((n
->nlmsg_type
!= RTM_GETTCLASS
) &&
1569 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1572 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1576 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1581 parent == TC_H_UNSPEC - unspecified parent.
1582 parent == TC_H_ROOT - class is root, which has no parent.
1583 parent == X:0 - parent is root class.
1584 parent == X:Y - parent is a node in hierarchy.
1585 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1587 handle == 0:0 - generate handle from kernel pool.
1588 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1589 handle == X:Y - clear.
1590 handle == X:0 - root class.
1593 /* Step 1. Determine qdisc handle X:0 */
1595 portid
= tcm
->tcm_parent
;
1596 clid
= tcm
->tcm_handle
;
1597 qid
= TC_H_MAJ(clid
);
1599 if (portid
!= TC_H_ROOT
) {
1600 u32 qid1
= TC_H_MAJ(portid
);
1603 /* If both majors are known, they must be identical. */
1608 } else if (qid
== 0)
1609 qid
= dev
->qdisc
->handle
;
1611 /* Now qid is genuine qdisc handle consistent
1612 * both with parent and child.
1614 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1617 portid
= TC_H_MAKE(qid
, portid
);
1620 qid
= dev
->qdisc
->handle
;
1623 /* OK. Locate qdisc */
1624 q
= qdisc_lookup(dev
, qid
);
1628 /* An check that it supports classes */
1629 cops
= q
->ops
->cl_ops
;
1633 /* Now try to get class */
1635 if (portid
== TC_H_ROOT
)
1638 clid
= TC_H_MAKE(qid
, clid
);
1641 cl
= cops
->get(q
, clid
);
1645 if (n
->nlmsg_type
!= RTM_NEWTCLASS
||
1646 !(n
->nlmsg_flags
& NLM_F_CREATE
))
1649 switch (n
->nlmsg_type
) {
1652 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1658 err
= cops
->delete(q
, cl
);
1660 tclass_notify(net
, skb
, n
, q
, cl
, RTM_DELTCLASS
);
1663 err
= tclass_notify(net
, skb
, n
, q
, cl
, RTM_NEWTCLASS
);
1674 err
= cops
->change(q
, clid
, portid
, tca
, &new_cl
);
1676 tclass_notify(net
, skb
, n
, q
, new_cl
, RTM_NEWTCLASS
);
1686 static int tc_fill_tclass(struct sk_buff
*skb
, struct Qdisc
*q
,
1688 u32 portid
, u32 seq
, u16 flags
, int event
)
1691 struct nlmsghdr
*nlh
;
1692 unsigned char *b
= skb_tail_pointer(skb
);
1694 const struct Qdisc_class_ops
*cl_ops
= q
->ops
->cl_ops
;
1697 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1699 goto out_nlmsg_trim
;
1700 tcm
= nlmsg_data(nlh
);
1701 tcm
->tcm_family
= AF_UNSPEC
;
1704 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1705 tcm
->tcm_parent
= q
->handle
;
1706 tcm
->tcm_handle
= q
->handle
;
1708 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1709 goto nla_put_failure
;
1710 if (cl_ops
->dump
&& cl_ops
->dump(q
, cl
, skb
, tcm
) < 0)
1711 goto nla_put_failure
;
1713 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1714 qdisc_root_sleeping_lock(q
), &d
) < 0)
1715 goto nla_put_failure
;
1717 if (cl_ops
->dump_stats
&& cl_ops
->dump_stats(q
, cl
, &d
) < 0)
1718 goto nla_put_failure
;
1720 if (gnet_stats_finish_copy(&d
) < 0)
1721 goto nla_put_failure
;
1723 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1732 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
1733 struct nlmsghdr
*n
, struct Qdisc
*q
,
1734 unsigned long cl
, int event
)
1736 struct sk_buff
*skb
;
1737 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1739 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1743 if (tc_fill_tclass(skb
, q
, cl
, portid
, n
->nlmsg_seq
, 0, event
) < 0) {
1748 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1749 n
->nlmsg_flags
& NLM_F_ECHO
);
1752 struct qdisc_dump_args
{
1753 struct qdisc_walker w
;
1754 struct sk_buff
*skb
;
1755 struct netlink_callback
*cb
;
1758 static int qdisc_class_dump(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*arg
)
1760 struct qdisc_dump_args
*a
= (struct qdisc_dump_args
*)arg
;
1762 return tc_fill_tclass(a
->skb
, q
, cl
, NETLINK_CB(a
->cb
->skb
).portid
,
1763 a
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWTCLASS
);
1766 static int tc_dump_tclass_qdisc(struct Qdisc
*q
, struct sk_buff
*skb
,
1767 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1770 struct qdisc_dump_args arg
;
1772 if (tc_qdisc_dump_ignore(q
) ||
1773 *t_p
< s_t
|| !q
->ops
->cl_ops
||
1775 TC_H_MAJ(tcm
->tcm_parent
) != q
->handle
)) {
1780 memset(&cb
->args
[1], 0, sizeof(cb
->args
)-sizeof(cb
->args
[0]));
1781 arg
.w
.fn
= qdisc_class_dump
;
1785 arg
.w
.skip
= cb
->args
[1];
1787 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1788 cb
->args
[1] = arg
.w
.count
;
1795 static int tc_dump_tclass_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1796 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1804 if (tc_dump_tclass_qdisc(root
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1807 list_for_each_entry(q
, &root
->list
, list
) {
1808 if (tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1815 static int tc_dump_tclass(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1817 struct tcmsg
*tcm
= nlmsg_data(cb
->nlh
);
1818 struct net
*net
= sock_net(skb
->sk
);
1819 struct netdev_queue
*dev_queue
;
1820 struct net_device
*dev
;
1823 if (nlmsg_len(cb
->nlh
) < sizeof(*tcm
))
1825 dev
= dev_get_by_index(net
, tcm
->tcm_ifindex
);
1832 if (tc_dump_tclass_root(dev
->qdisc
, skb
, tcm
, cb
, &t
, s_t
) < 0)
1835 dev_queue
= dev_ingress_queue(dev
);
1837 tc_dump_tclass_root(dev_queue
->qdisc_sleeping
, skb
, tcm
, cb
,
1848 /* Main classifier routine: scans classifier chain attached
1849 * to this qdisc, (optionally) tests for protocol and asks
1850 * specific classifiers.
1852 int tc_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
1853 struct tcf_result
*res
, bool compat_mode
)
1855 __be16 protocol
= tc_skb_protocol(skb
);
1856 #ifdef CONFIG_NET_CLS_ACT
1857 const struct tcf_proto
*old_tp
= tp
;
1862 for (; tp
; tp
= rcu_dereference_bh(tp
->next
)) {
1865 if (tp
->protocol
!= protocol
&&
1866 tp
->protocol
!= htons(ETH_P_ALL
))
1869 err
= tp
->classify(skb
, tp
, res
);
1870 #ifdef CONFIG_NET_CLS_ACT
1871 if (unlikely(err
== TC_ACT_RECLASSIFY
&& !compat_mode
))
1879 #ifdef CONFIG_NET_CLS_ACT
1881 if (unlikely(limit
++ >= MAX_REC_LOOP
)) {
1882 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
1883 tp
->q
->ops
->id
, tp
->prio
& 0xffff,
1884 ntohs(tp
->protocol
));
1889 protocol
= tc_skb_protocol(skb
);
1893 EXPORT_SYMBOL(tc_classify
);
1895 bool tcf_destroy(struct tcf_proto
*tp
, bool force
)
1897 if (tp
->ops
->destroy(tp
, force
)) {
1898 module_put(tp
->ops
->owner
);
1906 void tcf_destroy_chain(struct tcf_proto __rcu
**fl
)
1908 struct tcf_proto
*tp
;
1910 while ((tp
= rtnl_dereference(*fl
)) != NULL
) {
1911 RCU_INIT_POINTER(*fl
, tp
->next
);
1912 tcf_destroy(tp
, true);
1915 EXPORT_SYMBOL(tcf_destroy_chain
);
1917 #ifdef CONFIG_PROC_FS
1918 static int psched_show(struct seq_file
*seq
, void *v
)
1920 seq_printf(seq
, "%08x %08x %08x %08x\n",
1921 (u32
)NSEC_PER_USEC
, (u32
)PSCHED_TICKS2NS(1),
1923 (u32
)NSEC_PER_SEC
/ hrtimer_resolution
);
1928 static int psched_open(struct inode
*inode
, struct file
*file
)
1930 return single_open(file
, psched_show
, NULL
);
1933 static const struct file_operations psched_fops
= {
1934 .owner
= THIS_MODULE
,
1935 .open
= psched_open
,
1937 .llseek
= seq_lseek
,
1938 .release
= single_release
,
1941 static int __net_init
psched_net_init(struct net
*net
)
1943 struct proc_dir_entry
*e
;
1945 e
= proc_create("psched", 0, net
->proc_net
, &psched_fops
);
1952 static void __net_exit
psched_net_exit(struct net
*net
)
1954 remove_proc_entry("psched", net
->proc_net
);
1957 static int __net_init
psched_net_init(struct net
*net
)
1962 static void __net_exit
psched_net_exit(struct net
*net
)
1967 static struct pernet_operations psched_net_ops
= {
1968 .init
= psched_net_init
,
1969 .exit
= psched_net_exit
,
1972 static int __init
pktsched_init(void)
1976 err
= register_pernet_subsys(&psched_net_ops
);
1978 pr_err("pktsched_init: "
1979 "cannot initialize per netns operations\n");
1983 register_qdisc(&pfifo_fast_ops
);
1984 register_qdisc(&pfifo_qdisc_ops
);
1985 register_qdisc(&bfifo_qdisc_ops
);
1986 register_qdisc(&pfifo_head_drop_qdisc_ops
);
1987 register_qdisc(&mq_qdisc_ops
);
1988 register_qdisc(&noqueue_qdisc_ops
);
1990 rtnl_register(PF_UNSPEC
, RTM_NEWQDISC
, tc_modify_qdisc
, NULL
, NULL
);
1991 rtnl_register(PF_UNSPEC
, RTM_DELQDISC
, tc_get_qdisc
, NULL
, NULL
);
1992 rtnl_register(PF_UNSPEC
, RTM_GETQDISC
, tc_get_qdisc
, tc_dump_qdisc
, NULL
);
1993 rtnl_register(PF_UNSPEC
, RTM_NEWTCLASS
, tc_ctl_tclass
, NULL
, NULL
);
1994 rtnl_register(PF_UNSPEC
, RTM_DELTCLASS
, tc_ctl_tclass
, NULL
, NULL
);
1995 rtnl_register(PF_UNSPEC
, RTM_GETTCLASS
, tc_ctl_tclass
, tc_dump_tclass
, NULL
);
2000 subsys_initcall(pktsched_init
);