const struct gnet_stats_basic_packed *b,
struct gnet_stats_rate_est64 *r);
int gnet_stats_copy_queue(struct gnet_dump *d,
- struct gnet_stats_queue *q, __u32 len);
+ struct gnet_stats_queue __percpu *cpu_q,
+ struct gnet_stats_queue *q, __u32 qlen);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
} __packed;
unsigned int __state;
- struct gnet_stats_queue qstats;
+ union {
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
+ } __packed;
struct rcu_head rcu_head;
int padded;
atomic_t refcnt;
sch->qstats.drops++;
}
+static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+{
+ struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+
+ qstats->drops++;
+}
+
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
{
sch->qstats.overlimits++;
}
EXPORT_SYMBOL(gnet_stats_copy_rate_est);
+static void
+__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *q)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
+
+ qstats->qlen = 0;
+ qstats->backlog += qcpu->backlog;
+ qstats->drops += qcpu->drops;
+ qstats->requeues += qcpu->requeues;
+ qstats->overlimits += qcpu->overlimits;
+ }
+}
+
+static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu,
+ const struct gnet_stats_queue *q,
+ __u32 qlen)
+{
+ if (cpu) {
+ __gnet_stats_copy_queue_cpu(qstats, cpu);
+ } else {
+ qstats->qlen = q->qlen;
+ qstats->backlog = q->backlog;
+ qstats->drops = q->drops;
+ qstats->requeues = q->requeues;
+ qstats->overlimits = q->overlimits;
+ }
+
+ qstats->qlen = qlen;
+}
+
/**
* gnet_stats_copy_queue - copy queue statistics into statistics TLV
* @d: dumping handle
+ * @cpu_q: per cpu queue statistics
* @q: queue statistics
* @qlen: queue length statistics
*
* Appends the queue statistics to the top level TLV created by
- * gnet_stats_start_copy().
+ * gnet_stats_start_copy(). Using per cpu queue statistics if
+ * they are available.
*
* Returns 0 on success or -1 with the statistic lock released
* if the room in the socket buffer was not sufficient.
*/
int
gnet_stats_copy_queue(struct gnet_dump *d,
+ struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen)
{
- q->qlen = qlen;
+ struct gnet_stats_queue qstats = {0};
+
+ __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
if (d->compat_tc_stats) {
- d->tc_stats.drops = q->drops;
- d->tc_stats.qlen = q->qlen;
- d->tc_stats.backlog = q->backlog;
- d->tc_stats.overlimits = q->overlimits;
+ d->tc_stats.drops = qstats.drops;
+ d->tc_stats.qlen = qstats.qlen;
+ d->tc_stats.backlog = qstats.backlog;
+ d->tc_stats.overlimits = qstats.overlimits;
}
if (d->tail)
- return gnet_stats_copy(d, TCA_STATS_QUEUE, q, sizeof(*q));
+ return gnet_stats_copy(d, TCA_STATS_QUEUE,
+ &qstats, sizeof(qstats));
return 0;
}
if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 ||
- gnet_stats_copy_queue(&d,
+ gnet_stats_copy_queue(&d, NULL,
&p->tcfc_qstats,
p->tcfc_qstats.qlen) < 0)
goto errout;
alloc_percpu(struct gnet_stats_basic_cpu);
if (!sch->cpu_bstats)
goto err_out4;
+
+ sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+ if (!sch->cpu_qstats)
+ goto err_out4;
}
if (tca[TCA_STAB]) {
err_out4:
free_percpu(sch->cpu_bstats);
+ free_percpu(sch->cpu_qstats);
/*
* Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary.
u32 portid, u32 seq, u16 flags, int event)
{
struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
+ struct gnet_stats_queue __percpu *cpu_qstats = NULL;
struct tcmsg *tcm;
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
goto nla_put_failure;
- if (qdisc_is_percpu_stats(q))
+ if (qdisc_is_percpu_stats(q)) {
cpu_bstats = q->cpu_bstats;
+ cpu_qstats = q->cpu_qstats;
+ }
if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
- gnet_stats_copy_queue(&d, &q->qstats, qlen) < 0)
+ gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
goto nla_put_failure;
if (gnet_stats_finish_copy(&d) < 0)
struct atm_flow_data *flow = (struct atm_flow_data *)arg;
if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 ||
- gnet_stats_copy_queue(d, &flow->qstats, flow->q->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
return -1;
return 0;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qstats, cl->q->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qdisc->qstats, qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped;
}
- if (gnet_stats_copy_queue(d, &qs, 0) < 0)
+ if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
return -1;
if (idx < q->flows_cnt)
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qstats, cl->qdisc->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qstats, qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
- gnet_stats_copy_queue(d, &sch->qstats, sch->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
return -1;
return 0;
}
/* Reclaim root sleeping lock before completing stats */
spin_lock_bh(d->lock);
if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 ||
- gnet_stats_copy_queue(d, &qstats, qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
return -1;
} else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
- gnet_stats_copy_queue(d, &sch->qstats, sch->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL,
+ &sch->qstats, sch->q.qlen) < 0)
return -1;
}
return 0;
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
- gnet_stats_copy_queue(d, &cl_q->qstats, cl_q->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1;
return 0;
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
- gnet_stats_copy_queue(d, &cl_q->qstats, cl_q->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1;
return 0;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL,
+ &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
qs.qlen = slot->qlen;
qs.backlog = slot->backlog;
}
- if (gnet_stats_copy_queue(d, &qs, qs.qlen) < 0)
+ if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
}