usb: gadget: f_mtp: Avoid race between mtp_read and mtp_function_disable
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / net / sched / sch_mqprio.c
1 /*
2 * net/sched/sch_mqprio.c
3 *
4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <linux/module.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/sch_generic.h>
21
22 struct mqprio_sched {
23 struct Qdisc **qdiscs;
24 int hw_owned;
25 };
26
27 static void mqprio_destroy(struct Qdisc *sch)
28 {
29 struct net_device *dev = qdisc_dev(sch);
30 struct mqprio_sched *priv = qdisc_priv(sch);
31 unsigned int ntx;
32
33 if (priv->qdiscs) {
34 for (ntx = 0;
35 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
36 ntx++)
37 qdisc_destroy(priv->qdiscs[ntx]);
38 kfree(priv->qdiscs);
39 }
40
41 if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
42 dev->netdev_ops->ndo_setup_tc(dev, 0);
43 else
44 netdev_set_num_tc(dev, 0);
45 }
46
47 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
48 {
49 int i, j;
50
51 /* Verify num_tc is not out of max range */
52 if (qopt->num_tc > TC_MAX_QUEUE)
53 return -EINVAL;
54
55 /* Verify priority mapping uses valid tcs */
56 for (i = 0; i < TC_BITMASK + 1; i++) {
57 if (qopt->prio_tc_map[i] >= qopt->num_tc)
58 return -EINVAL;
59 }
60
61 /* net_device does not support requested operation */
62 if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
63 return -EINVAL;
64
65 /* if hw owned qcount and qoffset are taken from LLD so
66 * no reason to verify them here
67 */
68 if (qopt->hw)
69 return 0;
70
71 for (i = 0; i < qopt->num_tc; i++) {
72 unsigned int last = qopt->offset[i] + qopt->count[i];
73
74 /* Verify the queue count is in tx range being equal to the
75 * real_num_tx_queues indicates the last queue is in use.
76 */
77 if (qopt->offset[i] >= dev->real_num_tx_queues ||
78 !qopt->count[i] ||
79 last > dev->real_num_tx_queues)
80 return -EINVAL;
81
82 /* Verify that the offset and counts do not overlap */
83 for (j = i + 1; j < qopt->num_tc; j++) {
84 if (last > qopt->offset[j])
85 return -EINVAL;
86 }
87 }
88
89 return 0;
90 }
91
92 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
93 {
94 struct net_device *dev = qdisc_dev(sch);
95 struct mqprio_sched *priv = qdisc_priv(sch);
96 struct netdev_queue *dev_queue;
97 struct Qdisc *qdisc;
98 int i, err = -EOPNOTSUPP;
99 struct tc_mqprio_qopt *qopt = NULL;
100
101 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
102 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
103
104 if (sch->parent != TC_H_ROOT)
105 return -EOPNOTSUPP;
106
107 if (!netif_is_multiqueue(dev))
108 return -EOPNOTSUPP;
109
110 if (!opt || nla_len(opt) < sizeof(*qopt))
111 return -EINVAL;
112
113 qopt = nla_data(opt);
114 if (mqprio_parse_opt(dev, qopt))
115 return -EINVAL;
116
117 /* pre-allocate qdisc, attachment can't fail */
118 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
119 GFP_KERNEL);
120 if (!priv->qdiscs)
121 return -ENOMEM;
122
123 for (i = 0; i < dev->num_tx_queues; i++) {
124 dev_queue = netdev_get_tx_queue(dev, i);
125 qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
126 TC_H_MAKE(TC_H_MAJ(sch->handle),
127 TC_H_MIN(i + 1)));
128 if (!qdisc)
129 return -ENOMEM;
130
131 priv->qdiscs[i] = qdisc;
132 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
133 }
134
135 /* If the mqprio options indicate that hardware should own
136 * the queue mapping then run ndo_setup_tc otherwise use the
137 * supplied and verified mapping
138 */
139 if (qopt->hw) {
140 priv->hw_owned = 1;
141 err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
142 if (err)
143 return err;
144 } else {
145 netdev_set_num_tc(dev, qopt->num_tc);
146 for (i = 0; i < qopt->num_tc; i++)
147 netdev_set_tc_queue(dev, i,
148 qopt->count[i], qopt->offset[i]);
149 }
150
151 /* Always use supplied priority mappings */
152 for (i = 0; i < TC_BITMASK + 1; i++)
153 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
154
155 sch->flags |= TCQ_F_MQROOT;
156 return 0;
157 }
158
159 static void mqprio_attach(struct Qdisc *sch)
160 {
161 struct net_device *dev = qdisc_dev(sch);
162 struct mqprio_sched *priv = qdisc_priv(sch);
163 struct Qdisc *qdisc, *old;
164 unsigned int ntx;
165
166 /* Attach underlying qdisc */
167 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
168 qdisc = priv->qdiscs[ntx];
169 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
170 if (old)
171 qdisc_destroy(old);
172 if (ntx < dev->real_num_tx_queues)
173 qdisc_list_add(qdisc);
174 }
175 kfree(priv->qdiscs);
176 priv->qdiscs = NULL;
177 }
178
179 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
180 unsigned long cl)
181 {
182 struct net_device *dev = qdisc_dev(sch);
183 unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
184
185 if (ntx >= dev->num_tx_queues)
186 return NULL;
187 return netdev_get_tx_queue(dev, ntx);
188 }
189
190 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
191 struct Qdisc **old)
192 {
193 struct net_device *dev = qdisc_dev(sch);
194 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
195
196 if (!dev_queue)
197 return -EINVAL;
198
199 if (dev->flags & IFF_UP)
200 dev_deactivate(dev);
201
202 *old = dev_graft_qdisc(dev_queue, new);
203
204 if (new)
205 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
206
207 if (dev->flags & IFF_UP)
208 dev_activate(dev);
209
210 return 0;
211 }
212
213 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
214 {
215 struct net_device *dev = qdisc_dev(sch);
216 struct mqprio_sched *priv = qdisc_priv(sch);
217 unsigned char *b = skb_tail_pointer(skb);
218 struct tc_mqprio_qopt opt = { 0 };
219 struct Qdisc *qdisc;
220 unsigned int i;
221
222 sch->q.qlen = 0;
223 memset(&sch->bstats, 0, sizeof(sch->bstats));
224 memset(&sch->qstats, 0, sizeof(sch->qstats));
225
226 for (i = 0; i < dev->num_tx_queues; i++) {
227 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
228 spin_lock_bh(qdisc_lock(qdisc));
229 sch->q.qlen += qdisc->q.qlen;
230 sch->bstats.bytes += qdisc->bstats.bytes;
231 sch->bstats.packets += qdisc->bstats.packets;
232 sch->qstats.backlog += qdisc->qstats.backlog;
233 sch->qstats.drops += qdisc->qstats.drops;
234 sch->qstats.requeues += qdisc->qstats.requeues;
235 sch->qstats.overlimits += qdisc->qstats.overlimits;
236 spin_unlock_bh(qdisc_lock(qdisc));
237 }
238
239 opt.num_tc = netdev_get_num_tc(dev);
240 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
241 opt.hw = priv->hw_owned;
242
243 for (i = 0; i < netdev_get_num_tc(dev); i++) {
244 opt.count[i] = dev->tc_to_txq[i].count;
245 opt.offset[i] = dev->tc_to_txq[i].offset;
246 }
247
248 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
249 goto nla_put_failure;
250
251 return skb->len;
252 nla_put_failure:
253 nlmsg_trim(skb, b);
254 return -1;
255 }
256
257 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
258 {
259 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
260
261 if (!dev_queue)
262 return NULL;
263
264 return dev_queue->qdisc_sleeping;
265 }
266
267 static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
268 {
269 struct net_device *dev = qdisc_dev(sch);
270 unsigned int ntx = TC_H_MIN(classid);
271
272 if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
273 return 0;
274 return ntx;
275 }
276
277 static void mqprio_put(struct Qdisc *sch, unsigned long cl)
278 {
279 }
280
281 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
282 struct sk_buff *skb, struct tcmsg *tcm)
283 {
284 struct net_device *dev = qdisc_dev(sch);
285
286 if (cl <= netdev_get_num_tc(dev)) {
287 tcm->tcm_parent = TC_H_ROOT;
288 tcm->tcm_info = 0;
289 } else {
290 int i;
291 struct netdev_queue *dev_queue;
292
293 dev_queue = mqprio_queue_get(sch, cl);
294 tcm->tcm_parent = 0;
295 for (i = 0; i < netdev_get_num_tc(dev); i++) {
296 struct netdev_tc_txq tc = dev->tc_to_txq[i];
297 int q_idx = cl - netdev_get_num_tc(dev);
298
299 if (q_idx > tc.offset &&
300 q_idx <= tc.offset + tc.count) {
301 tcm->tcm_parent =
302 TC_H_MAKE(TC_H_MAJ(sch->handle),
303 TC_H_MIN(i + 1));
304 break;
305 }
306 }
307 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
308 }
309 tcm->tcm_handle |= TC_H_MIN(cl);
310 return 0;
311 }
312
313 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
314 struct gnet_dump *d)
315 __releases(d->lock)
316 __acquires(d->lock)
317 {
318 struct net_device *dev = qdisc_dev(sch);
319
320 if (cl <= netdev_get_num_tc(dev)) {
321 int i;
322 __u32 qlen = 0;
323 struct Qdisc *qdisc;
324 struct gnet_stats_queue qstats = {0};
325 struct gnet_stats_basic_packed bstats = {0};
326 struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
327
328 /* Drop lock here it will be reclaimed before touching
329 * statistics this is required because the d->lock we
330 * hold here is the look on dev_queue->qdisc_sleeping
331 * also acquired below.
332 */
333 spin_unlock_bh(d->lock);
334
335 for (i = tc.offset; i < tc.offset + tc.count; i++) {
336 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
337
338 qdisc = rtnl_dereference(q->qdisc);
339 spin_lock_bh(qdisc_lock(qdisc));
340 qlen += qdisc->q.qlen;
341 bstats.bytes += qdisc->bstats.bytes;
342 bstats.packets += qdisc->bstats.packets;
343 qstats.backlog += qdisc->qstats.backlog;
344 qstats.drops += qdisc->qstats.drops;
345 qstats.requeues += qdisc->qstats.requeues;
346 qstats.overlimits += qdisc->qstats.overlimits;
347 spin_unlock_bh(qdisc_lock(qdisc));
348 }
349 /* Reclaim root sleeping lock before completing stats */
350 spin_lock_bh(d->lock);
351 if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 ||
352 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
353 return -1;
354 } else {
355 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
356
357 sch = dev_queue->qdisc_sleeping;
358 if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
359 gnet_stats_copy_queue(d, NULL,
360 &sch->qstats, sch->q.qlen) < 0)
361 return -1;
362 }
363 return 0;
364 }
365
366 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
367 {
368 struct net_device *dev = qdisc_dev(sch);
369 unsigned long ntx;
370
371 if (arg->stop)
372 return;
373
374 /* Walk hierarchy with a virtual class per tc */
375 arg->count = arg->skip;
376 for (ntx = arg->skip;
377 ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
378 ntx++) {
379 if (arg->fn(sch, ntx + 1, arg) < 0) {
380 arg->stop = 1;
381 break;
382 }
383 arg->count++;
384 }
385 }
386
387 static const struct Qdisc_class_ops mqprio_class_ops = {
388 .graft = mqprio_graft,
389 .leaf = mqprio_leaf,
390 .get = mqprio_get,
391 .put = mqprio_put,
392 .walk = mqprio_walk,
393 .dump = mqprio_dump_class,
394 .dump_stats = mqprio_dump_class_stats,
395 };
396
397 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
398 .cl_ops = &mqprio_class_ops,
399 .id = "mqprio",
400 .priv_size = sizeof(struct mqprio_sched),
401 .init = mqprio_init,
402 .destroy = mqprio_destroy,
403 .attach = mqprio_attach,
404 .dump = mqprio_dump,
405 .owner = THIS_MODULE,
406 };
407
408 static int __init mqprio_module_init(void)
409 {
410 return register_qdisc(&mqprio_qdisc_ops);
411 }
412
413 static void __exit mqprio_module_exit(void)
414 {
415 unregister_qdisc(&mqprio_qdisc_ops);
416 }
417
418 module_init(mqprio_module_init);
419 module_exit(mqprio_module_exit);
420
421 MODULE_LICENSE("GPL");