usb: gadget: f_mtp: Avoid race between mtp_read and mtp_function_disable
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / net / sched / sch_prio.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_prio.c Simple 3-band priority "scheduler".
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10297b99 10 * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>:
1da177e4
LT
11 * Init -- EINVAL when opt undefined
12 */
13
1da177e4 14#include <linux/module.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/types.h>
17#include <linux/kernel.h>
1da177e4 18#include <linux/string.h>
1da177e4 19#include <linux/errno.h>
1da177e4 20#include <linux/skbuff.h>
1cac41cb 21#include <linux/netdevice.h>
dc5fc579 22#include <net/netlink.h>
1da177e4
LT
23#include <net/pkt_sched.h>
24
25
cc7ec456 26struct prio_sched_data {
1da177e4 27 int bands;
25d8c0d5 28 struct tcf_proto __rcu *filter_list;
1da177e4
LT
29 u8 prio2band[TC_PRIO_MAX+1];
30 struct Qdisc *queues[TCQ_PRIO_BANDS];
1cac41cb 31 u8 enable_flow;
1da177e4
LT
32};
33
34
35static struct Qdisc *
36prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
37{
38 struct prio_sched_data *q = qdisc_priv(sch);
39 u32 band = skb->priority;
40 struct tcf_result res;
25d8c0d5 41 struct tcf_proto *fl;
bdba91ec 42 int err;
1da177e4 43
c27f339a 44 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1da177e4 45 if (TC_H_MAJ(skb->priority) != sch->handle) {
25d8c0d5 46 fl = rcu_dereference_bh(q->filter_list);
3b3ae880 47 err = tc_classify(skb, fl, &res, false);
1da177e4 48#ifdef CONFIG_NET_CLS_ACT
dbaaa07a 49 switch (err) {
1da177e4
LT
50 case TC_ACT_STOLEN:
51 case TC_ACT_QUEUED:
378a2f09 52 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1da177e4
LT
53 case TC_ACT_SHOT:
54 return NULL;
3ff50b79 55 }
1da177e4 56#endif
25d8c0d5 57 if (!fl || err < 0) {
1da177e4
LT
58 if (TC_H_MAJ(band))
59 band = 0;
cc7ec456 60 return q->queues[q->prio2band[band & TC_PRIO_MAX]];
1da177e4
LT
61 }
62 band = res.classid;
63 }
64 band = TC_H_MIN(band) - 1;
3e5c2d3b 65 if (band >= q->bands)
1d8ae3fd
DM
66 return q->queues[q->prio2band[0]];
67
1da177e4
LT
68 return q->queues[band];
69}
70
71static int
72prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
73{
74 struct Qdisc *qdisc;
75 int ret;
76
77 qdisc = prio_classify(skb, sch, &ret);
78#ifdef CONFIG_NET_CLS_ACT
79 if (qdisc == NULL) {
29f1df6c 80
c27f339a 81 if (ret & __NET_XMIT_BYPASS)
25331d6c 82 qdisc_qstats_drop(sch);
1da177e4
LT
83 kfree_skb(skb);
84 return ret;
85 }
86#endif
87
5f86173b
JK
88 ret = qdisc_enqueue(skb, qdisc);
89 if (ret == NET_XMIT_SUCCESS) {
1da177e4
LT
90 sch->q.qlen++;
91 return NET_XMIT_SUCCESS;
92 }
378a2f09 93 if (net_xmit_drop_count(ret))
25331d6c 94 qdisc_qstats_drop(sch);
10297b99 95 return ret;
1da177e4
LT
96}
97
48a8f519
PM
98static struct sk_buff *prio_peek(struct Qdisc *sch)
99{
100 struct prio_sched_data *q = qdisc_priv(sch);
101 int prio;
1cac41cb
MB
102 if (!q->enable_flow)
103 return NULL;
48a8f519
PM
104 for (prio = 0; prio < q->bands; prio++) {
105 struct Qdisc *qdisc = q->queues[prio];
106 struct sk_buff *skb = qdisc->ops->peek(qdisc);
107 if (skb)
108 return skb;
109 }
110 return NULL;
111}
1da177e4 112
cc7ec456 113static struct sk_buff *prio_dequeue(struct Qdisc *sch)
1da177e4 114{
1da177e4
LT
115 struct prio_sched_data *q = qdisc_priv(sch);
116 int prio;
1cac41cb
MB
117 if (!q->enable_flow)
118 return NULL;
1da177e4
LT
119
120 for (prio = 0; prio < q->bands; prio++) {
1d8ae3fd 121 struct Qdisc *qdisc = q->queues[prio];
3557619f 122 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
1d8ae3fd 123 if (skb) {
9190b3b3 124 qdisc_bstats_update(sch, skb);
1d8ae3fd
DM
125 sch->q.qlen--;
126 return skb;
1da177e4
LT
127 }
128 }
129 return NULL;
130
131}
132
cc7ec456 133static unsigned int prio_drop(struct Qdisc *sch)
1da177e4
LT
134{
135 struct prio_sched_data *q = qdisc_priv(sch);
136 int prio;
137 unsigned int len;
138 struct Qdisc *qdisc;
139
140 for (prio = q->bands-1; prio >= 0; prio--) {
141 qdisc = q->queues[prio];
6d037a26 142 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
1da177e4
LT
143 sch->q.qlen--;
144 return len;
145 }
146 }
147 return 0;
148}
149
150
151static void
cc7ec456 152prio_reset(struct Qdisc *sch)
1da177e4
LT
153{
154 int prio;
155 struct prio_sched_data *q = qdisc_priv(sch);
156
cc7ec456 157 for (prio = 0; prio < q->bands; prio++)
1da177e4
LT
158 qdisc_reset(q->queues[prio]);
159 sch->q.qlen = 0;
1cac41cb 160 q->enable_flow = 1;
1da177e4
LT
161}
162
163static void
cc7ec456 164prio_destroy(struct Qdisc *sch)
1da177e4
LT
165{
166 int prio;
167 struct prio_sched_data *q = qdisc_priv(sch);
1da177e4 168
ff31ab56 169 tcf_destroy_chain(&q->filter_list);
cc7ec456 170 for (prio = 0; prio < q->bands; prio++)
1da177e4
LT
171 qdisc_destroy(q->queues[prio]);
172}
173
1e90474c 174static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
175{
176 struct prio_sched_data *q = qdisc_priv(sch);
d62733c8 177 struct tc_prio_qopt *qopt;
1da177e4 178 int i;
1cac41cb 179 int flow_change = 0;
1da177e4 180
1d8ae3fd
DM
181 if (nla_len(opt) < sizeof(*qopt))
182 return -EINVAL;
183 qopt = nla_data(opt);
d62733c8 184
1d8ae3fd 185 if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
1da177e4
LT
186 return -EINVAL;
187
cc7ec456 188 for (i = 0; i <= TC_PRIO_MAX; i++) {
1d8ae3fd 189 if (qopt->priomap[i] >= qopt->bands)
1da177e4
LT
190 return -EINVAL;
191 }
192
193 sch_tree_lock(sch);
1cac41cb
MB
194 if (q->enable_flow != qopt->enable_flow) {
195 q->enable_flow = qopt->enable_flow;
196 flow_change = 1;
197 }
1d8ae3fd 198 q->bands = qopt->bands;
1da177e4
LT
199 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
200
cc7ec456 201 for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
b94c8afc
PM
202 struct Qdisc *child = q->queues[i];
203 q->queues[i] = &noop_qdisc;
5e50da01 204 if (child != &noop_qdisc) {
ca375cf3 205 qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
1da177e4 206 qdisc_destroy(child);
5e50da01 207 }
1da177e4
LT
208 }
209 sch_tree_unlock(sch);
210
cc7ec456 211 for (i = 0; i < q->bands; i++) {
dd914b40 212 if (q->queues[i] == &noop_qdisc) {
b94c8afc 213 struct Qdisc *child, *old;
cc7ec456 214
3511c913 215 child = qdisc_create_dflt(sch->dev_queue,
bb949fbd 216 &pfifo_qdisc_ops,
9f9afec4 217 TC_H_MAKE(sch->handle, i + 1));
1da177e4
LT
218 if (child) {
219 sch_tree_lock(sch);
b94c8afc
PM
220 old = q->queues[i];
221 q->queues[i] = child;
1da177e4 222
b94c8afc 223 if (old != &noop_qdisc) {
ca375cf3
WC
224 qdisc_tree_reduce_backlog(old,
225 old->q.qlen,
226 old->qstats.backlog);
b94c8afc 227 qdisc_destroy(old);
5e50da01 228 }
1da177e4
LT
229 sch_tree_unlock(sch);
230 }
231 }
232 }
1cac41cb
MB
233 /* Schedule qdisc when flow re-enabled */
234 if (flow_change && q->enable_flow) {
235 if (!test_bit(__QDISC_STATE_DEACTIVATED,
236 &sch->state))
237 __netif_schedule(qdisc_root(sch));
238 }
1da177e4
LT
239 return 0;
240}
241
1e90474c 242static int prio_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
243{
244 struct prio_sched_data *q = qdisc_priv(sch);
245 int i;
246
cc7ec456 247 for (i = 0; i < TCQ_PRIO_BANDS; i++)
1da177e4
LT
248 q->queues[i] = &noop_qdisc;
249
250 if (opt == NULL) {
251 return -EINVAL;
252 } else {
253 int err;
254
cc7ec456 255 if ((err = prio_tune(sch, opt)) != 0)
1da177e4
LT
256 return err;
257 }
258 return 0;
259}
260
261static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
262{
263 struct prio_sched_data *q = qdisc_priv(sch);
27a884dc 264 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
265 struct tc_prio_qopt opt;
266
267 opt.bands = q->bands;
1cac41cb 268 opt.enable_flow = q->enable_flow;
cc7ec456 269 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
d62733c8 270
1b34ec43
DM
271 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
272 goto nla_put_failure;
d62733c8 273
1da177e4
LT
274 return skb->len;
275
1e90474c 276nla_put_failure:
dc5fc579 277 nlmsg_trim(skb, b);
1da177e4
LT
278 return -1;
279}
280
281static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
282 struct Qdisc **old)
283{
284 struct prio_sched_data *q = qdisc_priv(sch);
285 unsigned long band = arg - 1;
286
1da177e4
LT
287 if (new == NULL)
288 new = &noop_qdisc;
289
1188e140 290 *old = qdisc_replace(sch, new, &q->queues[band]);
1da177e4
LT
291 return 0;
292}
293
294static struct Qdisc *
295prio_leaf(struct Qdisc *sch, unsigned long arg)
296{
297 struct prio_sched_data *q = qdisc_priv(sch);
298 unsigned long band = arg - 1;
299
1da177e4
LT
300 return q->queues[band];
301}
302
303static unsigned long prio_get(struct Qdisc *sch, u32 classid)
304{
305 struct prio_sched_data *q = qdisc_priv(sch);
306 unsigned long band = TC_H_MIN(classid);
307
308 if (band - 1 >= q->bands)
309 return 0;
310 return band;
311}
312
313static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
314{
315 return prio_get(sch, classid);
316}
317
318
319static void prio_put(struct Qdisc *q, unsigned long cl)
320{
1da177e4
LT
321}
322
1da177e4
LT
323static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
324 struct tcmsg *tcm)
325{
326 struct prio_sched_data *q = qdisc_priv(sch);
327
1da177e4 328 tcm->tcm_handle |= TC_H_MIN(cl);
5b9a9ccf 329 tcm->tcm_info = q->queues[cl-1]->handle;
1da177e4
LT
330 return 0;
331}
332
2cf6c36c
JP
333static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
334 struct gnet_dump *d)
335{
336 struct prio_sched_data *q = qdisc_priv(sch);
337 struct Qdisc *cl_q;
338
339 cl_q = q->queues[cl - 1];
22e0f8b9 340 if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
b0ab6f92 341 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
2cf6c36c
JP
342 return -1;
343
344 return 0;
345}
346
1da177e4
LT
347static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
348{
349 struct prio_sched_data *q = qdisc_priv(sch);
350 int prio;
351
352 if (arg->stop)
353 return;
354
355 for (prio = 0; prio < q->bands; prio++) {
356 if (arg->count < arg->skip) {
357 arg->count++;
358 continue;
359 }
cc7ec456 360 if (arg->fn(sch, prio + 1, arg) < 0) {
1da177e4
LT
361 arg->stop = 1;
362 break;
363 }
364 arg->count++;
365 }
366}
367
25d8c0d5
JF
368static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch,
369 unsigned long cl)
1da177e4
LT
370{
371 struct prio_sched_data *q = qdisc_priv(sch);
372
373 if (cl)
374 return NULL;
375 return &q->filter_list;
376}
377
20fea08b 378static const struct Qdisc_class_ops prio_class_ops = {
1da177e4
LT
379 .graft = prio_graft,
380 .leaf = prio_leaf,
381 .get = prio_get,
382 .put = prio_put,
1da177e4
LT
383 .walk = prio_walk,
384 .tcf_chain = prio_find_tcf,
385 .bind_tcf = prio_bind,
386 .unbind_tcf = prio_put,
387 .dump = prio_dump_class,
2cf6c36c 388 .dump_stats = prio_dump_class_stats,
1da177e4
LT
389};
390
20fea08b 391static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
1da177e4
LT
392 .next = NULL,
393 .cl_ops = &prio_class_ops,
394 .id = "prio",
395 .priv_size = sizeof(struct prio_sched_data),
396 .enqueue = prio_enqueue,
397 .dequeue = prio_dequeue,
48a8f519 398 .peek = prio_peek,
1da177e4
LT
399 .drop = prio_drop,
400 .init = prio_init,
401 .reset = prio_reset,
402 .destroy = prio_destroy,
403 .change = prio_tune,
404 .dump = prio_dump,
405 .owner = THIS_MODULE,
406};
407
408static int __init prio_module_init(void)
409{
1d8ae3fd 410 return register_qdisc(&prio_qdisc_ops);
1da177e4
LT
411}
412
10297b99 413static void __exit prio_module_exit(void)
1da177e4
LT
414{
415 unregister_qdisc(&prio_qdisc_ops);
416}
417
418module_init(prio_module_init)
419module_exit(prio_module_exit)
420
421MODULE_LICENSE("GPL");