Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / sched / sch_netem.c
1 /*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
25
26 #define VERSION "1.2"
27
28 /* Network Emulation Queuing algorithm.
29 ====================================
30
31 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 Network Emulation Tool
33 [2] Luigi Rizzo, DummyNet for FreeBSD
34
35 ----------------------------------------------------------------
36
37 This started out as a simple way to delay outgoing packets to
38 test TCP but has grown to include most of the functionality
39 of a full blown network emulator like NISTnet. It can delay
40 packets and add random jitter (and correlation). The random
41 distribution can be loaded from a table as well to provide
42 normal, Pareto, or experimental curves. Packet loss,
43 duplication, and reordering can also be emulated.
44
45 This qdisc does not do classification that can be handled in
46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token
48 bucket or other rate control.
49
50 The simulator is limited by the Linux timer resolution
51 and will create packet bursts on the HZ boundary (1ms).
52 */
53
54 struct netem_sched_data {
55 struct Qdisc *qdisc;
56 struct qdisc_watchdog watchdog;
57
58 psched_tdiff_t latency;
59 psched_tdiff_t jitter;
60
61 u32 loss;
62 u32 limit;
63 u32 counter;
64 u32 gap;
65 u32 duplicate;
66 u32 reorder;
67 u32 corrupt;
68
69 struct crndstate {
70 u32 last;
71 u32 rho;
72 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
73
74 struct disttable {
75 u32 size;
76 s16 table[0];
77 } *delay_dist;
78 };
79
80 /* Time stamp put into socket buffer control block */
81 struct netem_skb_cb {
82 psched_time_t time_to_send;
83 };
84
85 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
86 {
87 BUILD_BUG_ON(sizeof(skb->cb) <
88 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
89 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
90 }
91
92 /* init_crandom - initialize correlated random number generator
93 * Use entropy source for initial seed.
94 */
95 static void init_crandom(struct crndstate *state, unsigned long rho)
96 {
97 state->rho = rho;
98 state->last = net_random();
99 }
100
101 /* get_crandom - correlated random number generator
102 * Next number depends on last value.
103 * rho is scaled to avoid floating point.
104 */
105 static u32 get_crandom(struct crndstate *state)
106 {
107 u64 value, rho;
108 unsigned long answer;
109
110 if (state->rho == 0) /* no correlation */
111 return net_random();
112
113 value = net_random();
114 rho = (u64)state->rho + 1;
115 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
116 state->last = answer;
117 return answer;
118 }
119
120 /* tabledist - return a pseudo-randomly distributed value with mean mu and
121 * std deviation sigma. Uses table lookup to approximate the desired
122 * distribution, and a uniformly-distributed pseudo-random source.
123 */
124 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
125 struct crndstate *state,
126 const struct disttable *dist)
127 {
128 psched_tdiff_t x;
129 long t;
130 u32 rnd;
131
132 if (sigma == 0)
133 return mu;
134
135 rnd = get_crandom(state);
136
137 /* default uniform distribution */
138 if (dist == NULL)
139 return (rnd % (2*sigma)) - sigma + mu;
140
141 t = dist->table[rnd % dist->size];
142 x = (sigma % NETEM_DIST_SCALE) * t;
143 if (x >= 0)
144 x += NETEM_DIST_SCALE/2;
145 else
146 x -= NETEM_DIST_SCALE/2;
147
148 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
149 }
150
151 /*
152 * Insert one skb into qdisc.
153 * Note: parent depends on return value to account for queue length.
154 * NET_XMIT_DROP: queue length didn't change.
155 * NET_XMIT_SUCCESS: one skb was queued.
156 */
157 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
158 {
159 struct netem_sched_data *q = qdisc_priv(sch);
160 /* We don't fill cb now as skb_unshare() may invalidate it */
161 struct netem_skb_cb *cb;
162 struct sk_buff *skb2;
163 int ret;
164 int count = 1;
165
166 pr_debug("netem_enqueue skb=%p\n", skb);
167
168 /* Random duplication */
169 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
170 ++count;
171
172 /* Random packet drop 0 => none, ~0 => all */
173 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
174 --count;
175
176 if (count == 0) {
177 sch->qstats.drops++;
178 kfree_skb(skb);
179 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
180 }
181
182 skb_orphan(skb);
183
184 /*
185 * If we need to duplicate packet, then re-insert at top of the
186 * qdisc tree, since parent queuer expects that only one
187 * skb will be queued.
188 */
189 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
190 struct Qdisc *rootq = qdisc_root(sch);
191 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
192 q->duplicate = 0;
193
194 qdisc_enqueue_root(skb2, rootq);
195 q->duplicate = dupsave;
196 }
197
198 /*
199 * Randomized packet corruption.
200 * Make copy if needed since we are modifying
201 * If packet is going to be hardware checksummed, then
202 * do it now in software before we mangle it.
203 */
204 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
205 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
206 || (skb->ip_summed == CHECKSUM_PARTIAL
207 && skb_checksum_help(skb))) {
208 sch->qstats.drops++;
209 return NET_XMIT_DROP;
210 }
211
212 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
213 }
214
215 cb = netem_skb_cb(skb);
216 if (q->gap == 0 /* not doing reordering */
217 || q->counter < q->gap /* inside last reordering gap */
218 || q->reorder < get_crandom(&q->reorder_cor)) {
219 psched_time_t now;
220 psched_tdiff_t delay;
221
222 delay = tabledist(q->latency, q->jitter,
223 &q->delay_cor, q->delay_dist);
224
225 now = psched_get_time();
226 cb->time_to_send = now + delay;
227 ++q->counter;
228 ret = qdisc_enqueue(skb, q->qdisc);
229 } else {
230 /*
231 * Do re-ordering by putting one out of N packets at the front
232 * of the queue.
233 */
234 cb->time_to_send = psched_get_time();
235 q->counter = 0;
236 ret = q->qdisc->ops->requeue(skb, q->qdisc);
237 }
238
239 if (likely(ret == NET_XMIT_SUCCESS)) {
240 sch->q.qlen++;
241 sch->bstats.bytes += qdisc_pkt_len(skb);
242 sch->bstats.packets++;
243 } else if (net_xmit_drop_count(ret)) {
244 sch->qstats.drops++;
245 }
246
247 pr_debug("netem: enqueue ret %d\n", ret);
248 return ret;
249 }
250
251 /* Requeue packets but don't change time stamp */
252 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
253 {
254 struct netem_sched_data *q = qdisc_priv(sch);
255 int ret;
256
257 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
258 sch->q.qlen++;
259 sch->qstats.requeues++;
260 }
261
262 return ret;
263 }
264
265 static unsigned int netem_drop(struct Qdisc* sch)
266 {
267 struct netem_sched_data *q = qdisc_priv(sch);
268 unsigned int len = 0;
269
270 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
271 sch->q.qlen--;
272 sch->qstats.drops++;
273 }
274 return len;
275 }
276
277 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
278 {
279 struct netem_sched_data *q = qdisc_priv(sch);
280 struct sk_buff *skb;
281
282 smp_mb();
283 if (sch->flags & TCQ_F_THROTTLED)
284 return NULL;
285
286 skb = q->qdisc->dequeue(q->qdisc);
287 if (skb) {
288 const struct netem_skb_cb *cb = netem_skb_cb(skb);
289 psched_time_t now = psched_get_time();
290
291 /* if more time remaining? */
292 if (cb->time_to_send <= now) {
293 pr_debug("netem_dequeue: return skb=%p\n", skb);
294 sch->q.qlen--;
295 return skb;
296 }
297
298 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
299 qdisc_tree_decrease_qlen(q->qdisc, 1);
300 sch->qstats.drops++;
301 printk(KERN_ERR "netem: %s could not requeue\n",
302 q->qdisc->ops->id);
303 }
304
305 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
306 }
307
308 return NULL;
309 }
310
311 static void netem_reset(struct Qdisc *sch)
312 {
313 struct netem_sched_data *q = qdisc_priv(sch);
314
315 qdisc_reset(q->qdisc);
316 sch->q.qlen = 0;
317 qdisc_watchdog_cancel(&q->watchdog);
318 }
319
320 /*
321 * Distribution data is a variable size payload containing
322 * signed 16 bit values.
323 */
324 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
325 {
326 struct netem_sched_data *q = qdisc_priv(sch);
327 unsigned long n = nla_len(attr)/sizeof(__s16);
328 const __s16 *data = nla_data(attr);
329 spinlock_t *root_lock;
330 struct disttable *d;
331 int i;
332
333 if (n > 65536)
334 return -EINVAL;
335
336 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
337 if (!d)
338 return -ENOMEM;
339
340 d->size = n;
341 for (i = 0; i < n; i++)
342 d->table[i] = data[i];
343
344 root_lock = qdisc_root_sleeping_lock(sch);
345
346 spin_lock_bh(root_lock);
347 d = xchg(&q->delay_dist, d);
348 spin_unlock_bh(root_lock);
349
350 kfree(d);
351 return 0;
352 }
353
354 static int get_correlation(struct Qdisc *sch, const struct nlattr *attr)
355 {
356 struct netem_sched_data *q = qdisc_priv(sch);
357 const struct tc_netem_corr *c = nla_data(attr);
358
359 init_crandom(&q->delay_cor, c->delay_corr);
360 init_crandom(&q->loss_cor, c->loss_corr);
361 init_crandom(&q->dup_cor, c->dup_corr);
362 return 0;
363 }
364
365 static int get_reorder(struct Qdisc *sch, const struct nlattr *attr)
366 {
367 struct netem_sched_data *q = qdisc_priv(sch);
368 const struct tc_netem_reorder *r = nla_data(attr);
369
370 q->reorder = r->probability;
371 init_crandom(&q->reorder_cor, r->correlation);
372 return 0;
373 }
374
375 static int get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
376 {
377 struct netem_sched_data *q = qdisc_priv(sch);
378 const struct tc_netem_corrupt *r = nla_data(attr);
379
380 q->corrupt = r->probability;
381 init_crandom(&q->corrupt_cor, r->correlation);
382 return 0;
383 }
384
385 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
386 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
387 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
388 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
389 };
390
391 /* Parse netlink message to set options */
392 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
393 {
394 struct netem_sched_data *q = qdisc_priv(sch);
395 struct nlattr *tb[TCA_NETEM_MAX + 1];
396 struct tc_netem_qopt *qopt;
397 int ret;
398
399 if (opt == NULL)
400 return -EINVAL;
401
402 ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy,
403 qopt, sizeof(*qopt));
404 if (ret < 0)
405 return ret;
406
407 ret = fifo_set_limit(q->qdisc, qopt->limit);
408 if (ret) {
409 pr_debug("netem: can't set fifo limit\n");
410 return ret;
411 }
412
413 q->latency = qopt->latency;
414 q->jitter = qopt->jitter;
415 q->limit = qopt->limit;
416 q->gap = qopt->gap;
417 q->counter = 0;
418 q->loss = qopt->loss;
419 q->duplicate = qopt->duplicate;
420
421 /* for compatibility with earlier versions.
422 * if gap is set, need to assume 100% probability
423 */
424 if (q->gap)
425 q->reorder = ~0;
426
427 if (tb[TCA_NETEM_CORR]) {
428 ret = get_correlation(sch, tb[TCA_NETEM_CORR]);
429 if (ret)
430 return ret;
431 }
432
433 if (tb[TCA_NETEM_DELAY_DIST]) {
434 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
435 if (ret)
436 return ret;
437 }
438
439 if (tb[TCA_NETEM_REORDER]) {
440 ret = get_reorder(sch, tb[TCA_NETEM_REORDER]);
441 if (ret)
442 return ret;
443 }
444
445 if (tb[TCA_NETEM_CORRUPT]) {
446 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
447 if (ret)
448 return ret;
449 }
450
451 return 0;
452 }
453
454 /*
455 * Special case version of FIFO queue for use by netem.
456 * It queues in order based on timestamps in skb's
457 */
458 struct fifo_sched_data {
459 u32 limit;
460 psched_time_t oldest;
461 };
462
463 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
464 {
465 struct fifo_sched_data *q = qdisc_priv(sch);
466 struct sk_buff_head *list = &sch->q;
467 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
468 struct sk_buff *skb;
469
470 if (likely(skb_queue_len(list) < q->limit)) {
471 /* Optimize for add at tail */
472 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
473 q->oldest = tnext;
474 return qdisc_enqueue_tail(nskb, sch);
475 }
476
477 skb_queue_reverse_walk(list, skb) {
478 const struct netem_skb_cb *cb = netem_skb_cb(skb);
479
480 if (tnext >= cb->time_to_send)
481 break;
482 }
483
484 __skb_queue_after(list, skb, nskb);
485
486 sch->qstats.backlog += qdisc_pkt_len(nskb);
487 sch->bstats.bytes += qdisc_pkt_len(nskb);
488 sch->bstats.packets++;
489
490 return NET_XMIT_SUCCESS;
491 }
492
493 return qdisc_reshape_fail(nskb, sch);
494 }
495
496 static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
497 {
498 struct fifo_sched_data *q = qdisc_priv(sch);
499
500 if (opt) {
501 struct tc_fifo_qopt *ctl = nla_data(opt);
502 if (nla_len(opt) < sizeof(*ctl))
503 return -EINVAL;
504
505 q->limit = ctl->limit;
506 } else
507 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
508
509 q->oldest = PSCHED_PASTPERFECT;
510 return 0;
511 }
512
513 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
514 {
515 struct fifo_sched_data *q = qdisc_priv(sch);
516 struct tc_fifo_qopt opt = { .limit = q->limit };
517
518 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
519 return skb->len;
520
521 nla_put_failure:
522 return -1;
523 }
524
525 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
526 .id = "tfifo",
527 .priv_size = sizeof(struct fifo_sched_data),
528 .enqueue = tfifo_enqueue,
529 .dequeue = qdisc_dequeue_head,
530 .requeue = qdisc_requeue,
531 .drop = qdisc_queue_drop,
532 .init = tfifo_init,
533 .reset = qdisc_reset_queue,
534 .change = tfifo_init,
535 .dump = tfifo_dump,
536 };
537
538 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
539 {
540 struct netem_sched_data *q = qdisc_priv(sch);
541 int ret;
542
543 if (!opt)
544 return -EINVAL;
545
546 qdisc_watchdog_init(&q->watchdog, sch);
547
548 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
549 &tfifo_qdisc_ops,
550 TC_H_MAKE(sch->handle, 1));
551 if (!q->qdisc) {
552 pr_debug("netem: qdisc create failed\n");
553 return -ENOMEM;
554 }
555
556 ret = netem_change(sch, opt);
557 if (ret) {
558 pr_debug("netem: change failed\n");
559 qdisc_destroy(q->qdisc);
560 }
561 return ret;
562 }
563
564 static void netem_destroy(struct Qdisc *sch)
565 {
566 struct netem_sched_data *q = qdisc_priv(sch);
567
568 qdisc_watchdog_cancel(&q->watchdog);
569 qdisc_destroy(q->qdisc);
570 kfree(q->delay_dist);
571 }
572
573 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
574 {
575 const struct netem_sched_data *q = qdisc_priv(sch);
576 unsigned char *b = skb_tail_pointer(skb);
577 struct nlattr *nla = (struct nlattr *) b;
578 struct tc_netem_qopt qopt;
579 struct tc_netem_corr cor;
580 struct tc_netem_reorder reorder;
581 struct tc_netem_corrupt corrupt;
582
583 qopt.latency = q->latency;
584 qopt.jitter = q->jitter;
585 qopt.limit = q->limit;
586 qopt.loss = q->loss;
587 qopt.gap = q->gap;
588 qopt.duplicate = q->duplicate;
589 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
590
591 cor.delay_corr = q->delay_cor.rho;
592 cor.loss_corr = q->loss_cor.rho;
593 cor.dup_corr = q->dup_cor.rho;
594 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
595
596 reorder.probability = q->reorder;
597 reorder.correlation = q->reorder_cor.rho;
598 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
599
600 corrupt.probability = q->corrupt;
601 corrupt.correlation = q->corrupt_cor.rho;
602 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
603
604 nla->nla_len = skb_tail_pointer(skb) - b;
605
606 return skb->len;
607
608 nla_put_failure:
609 nlmsg_trim(skb, b);
610 return -1;
611 }
612
613 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
614 struct sk_buff *skb, struct tcmsg *tcm)
615 {
616 struct netem_sched_data *q = qdisc_priv(sch);
617
618 if (cl != 1) /* only one class */
619 return -ENOENT;
620
621 tcm->tcm_handle |= TC_H_MIN(1);
622 tcm->tcm_info = q->qdisc->handle;
623
624 return 0;
625 }
626
627 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
628 struct Qdisc **old)
629 {
630 struct netem_sched_data *q = qdisc_priv(sch);
631
632 if (new == NULL)
633 new = &noop_qdisc;
634
635 sch_tree_lock(sch);
636 *old = xchg(&q->qdisc, new);
637 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
638 qdisc_reset(*old);
639 sch_tree_unlock(sch);
640
641 return 0;
642 }
643
644 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
645 {
646 struct netem_sched_data *q = qdisc_priv(sch);
647 return q->qdisc;
648 }
649
650 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
651 {
652 return 1;
653 }
654
655 static void netem_put(struct Qdisc *sch, unsigned long arg)
656 {
657 }
658
659 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
660 struct nlattr **tca, unsigned long *arg)
661 {
662 return -ENOSYS;
663 }
664
665 static int netem_delete(struct Qdisc *sch, unsigned long arg)
666 {
667 return -ENOSYS;
668 }
669
670 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
671 {
672 if (!walker->stop) {
673 if (walker->count >= walker->skip)
674 if (walker->fn(sch, 1, walker) < 0) {
675 walker->stop = 1;
676 return;
677 }
678 walker->count++;
679 }
680 }
681
682 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
683 {
684 return NULL;
685 }
686
687 static const struct Qdisc_class_ops netem_class_ops = {
688 .graft = netem_graft,
689 .leaf = netem_leaf,
690 .get = netem_get,
691 .put = netem_put,
692 .change = netem_change_class,
693 .delete = netem_delete,
694 .walk = netem_walk,
695 .tcf_chain = netem_find_tcf,
696 .dump = netem_dump_class,
697 };
698
699 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
700 .id = "netem",
701 .cl_ops = &netem_class_ops,
702 .priv_size = sizeof(struct netem_sched_data),
703 .enqueue = netem_enqueue,
704 .dequeue = netem_dequeue,
705 .requeue = netem_requeue,
706 .drop = netem_drop,
707 .init = netem_init,
708 .reset = netem_reset,
709 .destroy = netem_destroy,
710 .change = netem_change,
711 .dump = netem_dump,
712 .owner = THIS_MODULE,
713 };
714
715
716 static int __init netem_module_init(void)
717 {
718 pr_info("netem: version " VERSION "\n");
719 return register_qdisc(&netem_qdisc_ops);
720 }
721 static void __exit netem_module_exit(void)
722 {
723 unregister_qdisc(&netem_qdisc_ops);
724 }
725 module_init(netem_module_init)
726 module_exit(netem_module_exit)
727 MODULE_LICENSE("GPL");