usb: gadget: f_mtp: Avoid race between mtp_read and mtp_function_disable
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / net / sched / sch_prio.c
1 /*
2 * net/sched/sch_prio.c Simple 3-band priority "scheduler".
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>:
11 * Init -- EINVAL when opt undefined
12 */
13
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/netdevice.h>
22 #include <net/netlink.h>
23 #include <net/pkt_sched.h>
24
25
26 struct prio_sched_data {
27 int bands;
28 struct tcf_proto __rcu *filter_list;
29 u8 prio2band[TC_PRIO_MAX+1];
30 struct Qdisc *queues[TCQ_PRIO_BANDS];
31 u8 enable_flow;
32 };
33
34
35 static struct Qdisc *
36 prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
37 {
38 struct prio_sched_data *q = qdisc_priv(sch);
39 u32 band = skb->priority;
40 struct tcf_result res;
41 struct tcf_proto *fl;
42 int err;
43
44 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
45 if (TC_H_MAJ(skb->priority) != sch->handle) {
46 fl = rcu_dereference_bh(q->filter_list);
47 err = tc_classify(skb, fl, &res, false);
48 #ifdef CONFIG_NET_CLS_ACT
49 switch (err) {
50 case TC_ACT_STOLEN:
51 case TC_ACT_QUEUED:
52 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
53 case TC_ACT_SHOT:
54 return NULL;
55 }
56 #endif
57 if (!fl || err < 0) {
58 if (TC_H_MAJ(band))
59 band = 0;
60 return q->queues[q->prio2band[band & TC_PRIO_MAX]];
61 }
62 band = res.classid;
63 }
64 band = TC_H_MIN(band) - 1;
65 if (band >= q->bands)
66 return q->queues[q->prio2band[0]];
67
68 return q->queues[band];
69 }
70
71 static int
72 prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
73 {
74 struct Qdisc *qdisc;
75 int ret;
76
77 qdisc = prio_classify(skb, sch, &ret);
78 #ifdef CONFIG_NET_CLS_ACT
79 if (qdisc == NULL) {
80
81 if (ret & __NET_XMIT_BYPASS)
82 qdisc_qstats_drop(sch);
83 kfree_skb(skb);
84 return ret;
85 }
86 #endif
87
88 ret = qdisc_enqueue(skb, qdisc);
89 if (ret == NET_XMIT_SUCCESS) {
90 sch->q.qlen++;
91 return NET_XMIT_SUCCESS;
92 }
93 if (net_xmit_drop_count(ret))
94 qdisc_qstats_drop(sch);
95 return ret;
96 }
97
98 static struct sk_buff *prio_peek(struct Qdisc *sch)
99 {
100 struct prio_sched_data *q = qdisc_priv(sch);
101 int prio;
102 if (!q->enable_flow)
103 return NULL;
104 for (prio = 0; prio < q->bands; prio++) {
105 struct Qdisc *qdisc = q->queues[prio];
106 struct sk_buff *skb = qdisc->ops->peek(qdisc);
107 if (skb)
108 return skb;
109 }
110 return NULL;
111 }
112
113 static struct sk_buff *prio_dequeue(struct Qdisc *sch)
114 {
115 struct prio_sched_data *q = qdisc_priv(sch);
116 int prio;
117 if (!q->enable_flow)
118 return NULL;
119
120 for (prio = 0; prio < q->bands; prio++) {
121 struct Qdisc *qdisc = q->queues[prio];
122 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
123 if (skb) {
124 qdisc_bstats_update(sch, skb);
125 sch->q.qlen--;
126 return skb;
127 }
128 }
129 return NULL;
130
131 }
132
133 static unsigned int prio_drop(struct Qdisc *sch)
134 {
135 struct prio_sched_data *q = qdisc_priv(sch);
136 int prio;
137 unsigned int len;
138 struct Qdisc *qdisc;
139
140 for (prio = q->bands-1; prio >= 0; prio--) {
141 qdisc = q->queues[prio];
142 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
143 sch->q.qlen--;
144 return len;
145 }
146 }
147 return 0;
148 }
149
150
151 static void
152 prio_reset(struct Qdisc *sch)
153 {
154 int prio;
155 struct prio_sched_data *q = qdisc_priv(sch);
156
157 for (prio = 0; prio < q->bands; prio++)
158 qdisc_reset(q->queues[prio]);
159 sch->q.qlen = 0;
160 q->enable_flow = 1;
161 }
162
163 static void
164 prio_destroy(struct Qdisc *sch)
165 {
166 int prio;
167 struct prio_sched_data *q = qdisc_priv(sch);
168
169 tcf_destroy_chain(&q->filter_list);
170 for (prio = 0; prio < q->bands; prio++)
171 qdisc_destroy(q->queues[prio]);
172 }
173
174 static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
175 {
176 struct prio_sched_data *q = qdisc_priv(sch);
177 struct tc_prio_qopt *qopt;
178 int i;
179 int flow_change = 0;
180
181 if (nla_len(opt) < sizeof(*qopt))
182 return -EINVAL;
183 qopt = nla_data(opt);
184
185 if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
186 return -EINVAL;
187
188 for (i = 0; i <= TC_PRIO_MAX; i++) {
189 if (qopt->priomap[i] >= qopt->bands)
190 return -EINVAL;
191 }
192
193 sch_tree_lock(sch);
194 if (q->enable_flow != qopt->enable_flow) {
195 q->enable_flow = qopt->enable_flow;
196 flow_change = 1;
197 }
198 q->bands = qopt->bands;
199 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
200
201 for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
202 struct Qdisc *child = q->queues[i];
203 q->queues[i] = &noop_qdisc;
204 if (child != &noop_qdisc) {
205 qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
206 qdisc_destroy(child);
207 }
208 }
209 sch_tree_unlock(sch);
210
211 for (i = 0; i < q->bands; i++) {
212 if (q->queues[i] == &noop_qdisc) {
213 struct Qdisc *child, *old;
214
215 child = qdisc_create_dflt(sch->dev_queue,
216 &pfifo_qdisc_ops,
217 TC_H_MAKE(sch->handle, i + 1));
218 if (child) {
219 sch_tree_lock(sch);
220 old = q->queues[i];
221 q->queues[i] = child;
222
223 if (old != &noop_qdisc) {
224 qdisc_tree_reduce_backlog(old,
225 old->q.qlen,
226 old->qstats.backlog);
227 qdisc_destroy(old);
228 }
229 sch_tree_unlock(sch);
230 }
231 }
232 }
233 /* Schedule qdisc when flow re-enabled */
234 if (flow_change && q->enable_flow) {
235 if (!test_bit(__QDISC_STATE_DEACTIVATED,
236 &sch->state))
237 __netif_schedule(qdisc_root(sch));
238 }
239 return 0;
240 }
241
242 static int prio_init(struct Qdisc *sch, struct nlattr *opt)
243 {
244 struct prio_sched_data *q = qdisc_priv(sch);
245 int i;
246
247 for (i = 0; i < TCQ_PRIO_BANDS; i++)
248 q->queues[i] = &noop_qdisc;
249
250 if (opt == NULL) {
251 return -EINVAL;
252 } else {
253 int err;
254
255 if ((err = prio_tune(sch, opt)) != 0)
256 return err;
257 }
258 return 0;
259 }
260
261 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
262 {
263 struct prio_sched_data *q = qdisc_priv(sch);
264 unsigned char *b = skb_tail_pointer(skb);
265 struct tc_prio_qopt opt;
266
267 opt.bands = q->bands;
268 opt.enable_flow = q->enable_flow;
269 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
270
271 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
272 goto nla_put_failure;
273
274 return skb->len;
275
276 nla_put_failure:
277 nlmsg_trim(skb, b);
278 return -1;
279 }
280
281 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
282 struct Qdisc **old)
283 {
284 struct prio_sched_data *q = qdisc_priv(sch);
285 unsigned long band = arg - 1;
286
287 if (new == NULL)
288 new = &noop_qdisc;
289
290 *old = qdisc_replace(sch, new, &q->queues[band]);
291 return 0;
292 }
293
294 static struct Qdisc *
295 prio_leaf(struct Qdisc *sch, unsigned long arg)
296 {
297 struct prio_sched_data *q = qdisc_priv(sch);
298 unsigned long band = arg - 1;
299
300 return q->queues[band];
301 }
302
303 static unsigned long prio_get(struct Qdisc *sch, u32 classid)
304 {
305 struct prio_sched_data *q = qdisc_priv(sch);
306 unsigned long band = TC_H_MIN(classid);
307
308 if (band - 1 >= q->bands)
309 return 0;
310 return band;
311 }
312
313 static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
314 {
315 return prio_get(sch, classid);
316 }
317
318
319 static void prio_put(struct Qdisc *q, unsigned long cl)
320 {
321 }
322
323 static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
324 struct tcmsg *tcm)
325 {
326 struct prio_sched_data *q = qdisc_priv(sch);
327
328 tcm->tcm_handle |= TC_H_MIN(cl);
329 tcm->tcm_info = q->queues[cl-1]->handle;
330 return 0;
331 }
332
333 static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
334 struct gnet_dump *d)
335 {
336 struct prio_sched_data *q = qdisc_priv(sch);
337 struct Qdisc *cl_q;
338
339 cl_q = q->queues[cl - 1];
340 if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
341 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
342 return -1;
343
344 return 0;
345 }
346
347 static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
348 {
349 struct prio_sched_data *q = qdisc_priv(sch);
350 int prio;
351
352 if (arg->stop)
353 return;
354
355 for (prio = 0; prio < q->bands; prio++) {
356 if (arg->count < arg->skip) {
357 arg->count++;
358 continue;
359 }
360 if (arg->fn(sch, prio + 1, arg) < 0) {
361 arg->stop = 1;
362 break;
363 }
364 arg->count++;
365 }
366 }
367
368 static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch,
369 unsigned long cl)
370 {
371 struct prio_sched_data *q = qdisc_priv(sch);
372
373 if (cl)
374 return NULL;
375 return &q->filter_list;
376 }
377
378 static const struct Qdisc_class_ops prio_class_ops = {
379 .graft = prio_graft,
380 .leaf = prio_leaf,
381 .get = prio_get,
382 .put = prio_put,
383 .walk = prio_walk,
384 .tcf_chain = prio_find_tcf,
385 .bind_tcf = prio_bind,
386 .unbind_tcf = prio_put,
387 .dump = prio_dump_class,
388 .dump_stats = prio_dump_class_stats,
389 };
390
391 static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
392 .next = NULL,
393 .cl_ops = &prio_class_ops,
394 .id = "prio",
395 .priv_size = sizeof(struct prio_sched_data),
396 .enqueue = prio_enqueue,
397 .dequeue = prio_dequeue,
398 .peek = prio_peek,
399 .drop = prio_drop,
400 .init = prio_init,
401 .reset = prio_reset,
402 .destroy = prio_destroy,
403 .change = prio_tune,
404 .dump = prio_dump,
405 .owner = THIS_MODULE,
406 };
407
408 static int __init prio_module_init(void)
409 {
410 return register_qdisc(&prio_qdisc_ops);
411 }
412
413 static void __exit prio_module_exit(void)
414 {
415 unregister_qdisc(&prio_qdisc_ops);
416 }
417
418 module_init(prio_module_init)
419 module_exit(prio_module_exit)
420
421 MODULE_LICENSE("GPL");