#define CODEL_DISABLED_THRESHOLD INT_MAX
-static void codel_params_init(struct codel_params *params,
- const struct Qdisc *sch)
+static void codel_params_init(struct codel_params *params)
{
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
- params->mtu = psched_mtu(qdisc_dev(sch));
params->ce_threshold = CODEL_DISABLED_THRESHOLD;
params->ecn = false;
}
return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
}
+typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb);
+typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb);
+typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx);
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
+ void *ctx);
+
static bool codel_should_drop(const struct sk_buff *skb,
- struct Qdisc *sch,
+ void *ctx,
struct codel_vars *vars,
struct codel_params *params,
struct codel_stats *stats,
+ codel_skb_len_t skb_len_func,
+ codel_skb_time_t skb_time_func,
+ u32 *backlog,
codel_time_t now)
{
bool ok_to_drop;
+ u32 skb_len;
if (!skb) {
vars->first_above_time = 0;
return false;
}
- vars->ldelay = now - codel_get_enqueue_time(skb);
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ skb_len = skb_len_func(skb);
+ vars->ldelay = now - skb_time_func(skb);
- if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
- stats->maxpacket = qdisc_pkt_len(skb);
+ if (unlikely(skb_len > stats->maxpacket))
+ stats->maxpacket = skb_len;
if (codel_time_before(vars->ldelay, params->target) ||
- sch->qstats.backlog <= params->mtu) {
+ *backlog <= params->mtu) {
/* went below - stay below for at least interval */
vars->first_above_time = 0;
return false;
return ok_to_drop;
}
-typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
- struct Qdisc *sch);
-
-static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+static struct sk_buff *codel_dequeue(void *ctx,
+ u32 *backlog,
struct codel_params *params,
struct codel_vars *vars,
struct codel_stats *stats,
+ codel_skb_len_t skb_len_func,
+ codel_skb_time_t skb_time_func,
+ codel_skb_drop_t drop_func,
codel_skb_dequeue_t dequeue_func)
{
- struct sk_buff *skb = dequeue_func(vars, sch);
+ struct sk_buff *skb = dequeue_func(vars, ctx);
codel_time_t now;
bool drop;
return skb;
}
now = codel_get_time();
- drop = codel_should_drop(skb, sch, vars, params, stats, now);
+ drop = codel_should_drop(skb, ctx, vars, params, stats,
+ skb_len_func, skb_time_func, backlog, now);
if (vars->dropping) {
if (!drop) {
/* sojourn time below target - leave dropping state */
vars->rec_inv_sqrt);
goto end;
}
- stats->drop_len += qdisc_pkt_len(skb);
- qdisc_drop(skb, sch);
+ stats->drop_len += skb_len_func(skb);
+ drop_func(skb, ctx);
stats->drop_count++;
- skb = dequeue_func(vars, sch);
- if (!codel_should_drop(skb, sch,
- vars, params, stats, now)) {
+ skb = dequeue_func(vars, ctx);
+ if (!codel_should_drop(skb, ctx,
+ vars, params, stats,
+ skb_len_func,
+ skb_time_func,
+ backlog, now)) {
/* leave dropping state */
vars->dropping = false;
} else {
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
- stats->drop_len += qdisc_pkt_len(skb);
- qdisc_drop(skb, sch);
+ stats->drop_len += skb_len_func(skb);
+ drop_func(skb, ctx);
stats->drop_count++;
- skb = dequeue_func(vars, sch);
- drop = codel_should_drop(skb, sch, vars, params,
- stats, now);
+ skb = dequeue_func(vars, ctx);
+ drop = codel_should_drop(skb, ctx, vars, params,
+ stats, skb_len_func,
+ skb_time_func, backlog, now);
}
vars->dropping = true;
/* if min went above target close to when we last went below it
* to dequeue a packet from queue. Note: backlog is handled in
* codel, we dont need to reduce it here.
*/
-static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
{
+ struct Qdisc *sch = ctx;
struct sk_buff *skb = __skb_dequeue(&sch->q);
+ if (skb)
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+
prefetch(&skb->end); /* we'll need skb_shinfo() */
return skb;
}
+static void drop_func(struct sk_buff *skb, void *ctx)
+{
+ struct Qdisc *sch = ctx;
+
+ qdisc_drop(skb, sch);
+}
+
static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
{
struct codel_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+ skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
+ &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
+ drop_func, dequeue_func);
/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
* or HTB crashes. Defer it for next round.
sch->limit = DEFAULT_CODEL_LIMIT;
- codel_params_init(&q->params, sch);
+ codel_params_init(&q->params);
codel_vars_init(&q->vars);
codel_stats_init(&q->stats);
+ q->params.mtu = psched_mtu(qdisc_dev(sch));
if (opt) {
int err = codel_change(sch, opt);
* to dequeue a packet from queue. Note: backlog is handled in
* codel, we dont need to reduce it here.
*/
-static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
{
+ struct Qdisc *sch = ctx;
struct fq_codel_sched_data *q = qdisc_priv(sch);
struct fq_codel_flow *flow;
struct sk_buff *skb = NULL;
skb = dequeue_head(flow);
q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
sch->q.qlen--;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
}
return skb;
}
+static void drop_func(struct sk_buff *skb, void *ctx)
+{
+ struct Qdisc *sch = ctx;
+
+ qdisc_drop(skb, sch);
+}
+
static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
prev_ecn_mark = q->cstats.ecn_mark;
prev_backlog = sch->qstats.backlog;
- skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
- dequeue);
+ skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
+ &flow->cvars, &q->cstats, qdisc_pkt_len,
+ codel_get_enqueue_time, drop_func, dequeue_func);
flow->dropped += q->cstats.drop_count - prev_drop_count;
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
q->perturbation = prandom_u32();
INIT_LIST_HEAD(&q->new_flows);
INIT_LIST_HEAD(&q->old_flows);
- codel_params_init(&q->cparams, sch);
+ codel_params_init(&q->cparams);
codel_stats_init(&q->cstats);
q->cparams.ecn = true;
+ q->cparams.mtu = psched_mtu(qdisc_dev(sch));
if (opt) {
int err = fq_codel_change(sch, opt);