net_sched: remove generic throttled management
authorEric Dumazet <edumazet@google.com>
Fri, 10 Jun 2016 23:41:39 +0000 (16:41 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 11 Jun 2016 06:58:21 +0000 (23:58 -0700)
__QDISC_STATE_THROTTLED bit manipulation is rather expensive
for HTB and few others.

I already removed it for sch_fq in commit f2600cf02b5b
("net: sched: avoid costly atomic operation in fq_dequeue()")
and so far nobody complained.

When one ore more packets are stuck in one or more throttled
HTB class, a htb dequeue() performs two atomic operations
to clear/set __QDISC_STATE_THROTTLED bit, while root qdisc
lock is held.

Removing this pair of atomic operations bring me a 8 % performance
increase on 200 TCP_RR tests, in presence of throttled classes.

This patch has no side effect, since nothing actually uses
disc_is_throttled() anymore.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/pkt_sched.h
include/net/sch_generic.h
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_fq.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sched/sch_tbf.c

index fea53f4d92ca6bb16c367ee772d4c8c1aa19b701..7caa99b482c6352d36a8d383e4af2fded040831c 100644 (file)
@@ -67,12 +67,12 @@ struct qdisc_watchdog {
 };
 
 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle);
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
 
 static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
                                           psched_time_t expires)
 {
-       qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true);
+       qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
 }
 
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
index 9f3581980c154941b702158b5013cb3e51c2362b..9a0d177884c6472a06cb9670da9b5561e387376e 100644 (file)
@@ -26,7 +26,6 @@ struct qdisc_rate_table {
 enum qdisc_state_t {
        __QDISC_STATE_SCHED,
        __QDISC_STATE_DEACTIVATED,
-       __QDISC_STATE_THROTTLED,
 };
 
 struct qdisc_size_table {
@@ -125,21 +124,6 @@ static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
 #endif
 }
 
-static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
-{
-       return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
-}
-
-static inline void qdisc_throttled(struct Qdisc *qdisc)
-{
-       set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
-}
-
-static inline void qdisc_unthrottled(struct Qdisc *qdisc)
-{
-       clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
-}
-
 struct Qdisc_class_ops {
        /* Child qdisc manipulation */
        struct netdev_queue *   (*select_queue)(struct Qdisc *, struct tcmsg *);
index d4a8bbfcc9530c305a77b349bb30d6c19b6ca2f1..401eda6de682b9ba31e9178fccf29b8d162a0e23 100644 (file)
@@ -583,7 +583,6 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
                                                 timer);
 
        rcu_read_lock();
-       qdisc_unthrottled(wd->qdisc);
        __netif_schedule(qdisc_root(wd->qdisc));
        rcu_read_unlock();
 
@@ -598,15 +597,12 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 }
 EXPORT_SYMBOL(qdisc_watchdog_init);
 
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
 {
        if (test_bit(__QDISC_STATE_DEACTIVATED,
                     &qdisc_root_sleeping(wd->qdisc)->state))
                return;
 
-       if (throttle)
-               qdisc_throttled(wd->qdisc);
-
        if (wd->last_expires == expires)
                return;
 
@@ -620,7 +616,6 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 {
        hrtimer_cancel(&wd->timer);
-       qdisc_unthrottled(wd->qdisc);
 }
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
 
index 6e61f9aa8783977e386423fd1cddc20aa0860f34..a29fd811d7b9746ef21b3e94081cf18effaf37c4 100644 (file)
@@ -513,7 +513,6 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
                hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
        }
 
-       qdisc_unthrottled(sch);
        __netif_schedule(qdisc_root(sch));
        return HRTIMER_NORESTART;
 }
@@ -819,7 +818,6 @@ cbq_dequeue(struct Qdisc *sch)
                if (skb) {
                        qdisc_bstats_update(sch, skb);
                        sch->q.qlen--;
-                       qdisc_unthrottled(sch);
                        return skb;
                }
 
index 3c6a47d66a047da8765c9e90b808629a87afc0ea..f49c81e91acd6b35ccd565f5ae816584701a828c 100644 (file)
@@ -445,8 +445,7 @@ begin:
                if (!head->first) {
                        if (q->time_next_delayed_flow != ~0ULL)
                                qdisc_watchdog_schedule_ns(&q->watchdog,
-                                                          q->time_next_delayed_flow,
-                                                          false);
+                                                          q->time_next_delayed_flow);
                        return NULL;
                }
        }
index eb3d3f5aba80311462bb9ec18df49bcccf0fecaa..bd08c363a26d80b62c8fc576249ca301446d24f2 100644 (file)
@@ -1664,7 +1664,6 @@ hfsc_dequeue(struct Qdisc *sch)
                set_passive(cl);
        }
 
-       qdisc_unthrottled(sch);
        qdisc_bstats_update(sch, skb);
        qdisc_qstats_backlog_dec(sch, skb);
        sch->q.qlen--;
index b74d06668ab4d74c63027cab9cedf7a37f6d027a..07dcd2933f017eaea35dd159c27c66088ab5c3d5 100644 (file)
@@ -889,7 +889,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
        if (skb != NULL) {
 ok:
                qdisc_bstats_update(sch, skb);
-               qdisc_unthrottled(sch);
                qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
                return skb;
@@ -929,7 +928,7 @@ ok:
        }
        qdisc_qstats_overlimit(sch);
        if (likely(next_event > q->now))
-               qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
+               qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
        else
                schedule_work(&q->work);
 fin:
index 2dbe732ca135d07567326e4c3b018f0266ee91c9..876df13c745a9cc40a059af5fe7112dca6881972 100644 (file)
@@ -587,7 +587,6 @@ tfifo_dequeue:
        if (skb) {
                qdisc_qstats_backlog_dec(sch, skb);
 deliver:
-               qdisc_unthrottled(sch);
                qdisc_bstats_update(sch, skb);
                return skb;
        }
index 7fa3d6e1291c1f05bce726fadfbb8843457c4392..c12df84d1078c5b1a6bd40bba49446f7cc11ece2 100644 (file)
@@ -254,14 +254,12 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
                        q->ptokens = ptoks;
                        qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
-                       qdisc_unthrottled(sch);
                        qdisc_bstats_update(sch, skb);
                        return skb;
                }
 
                qdisc_watchdog_schedule_ns(&q->watchdog,
-                                          now + max_t(long, -toks, -ptoks),
-                                          true);
+                                          now + max_t(long, -toks, -ptoks));
 
                /* Maybe we have a shorter packet in the queue,
                   which can be sent now. It sounds cool,