Revert "pkt_sched: sch_sfq: dump a real number of flows"
[deliverable/linux.git] / net / sched / sch_netem.c
1 /*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
25
26 #define VERSION "1.2"
27
28 /* Network Emulation Queuing algorithm.
29 ====================================
30
31 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 Network Emulation Tool
33 [2] Luigi Rizzo, DummyNet for FreeBSD
34
35 ----------------------------------------------------------------
36
37 This started out as a simple way to delay outgoing packets to
38 test TCP but has grown to include most of the functionality
39 of a full blown network emulator like NISTnet. It can delay
40 packets and add random jitter (and correlation). The random
41 distribution can be loaded from a table as well to provide
42 normal, Pareto, or experimental curves. Packet loss,
43 duplication, and reordering can also be emulated.
44
45 This qdisc does not do classification that can be handled in
46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token
48 bucket or other rate control.
49
50 The simulator is limited by the Linux timer resolution
51 and will create packet bursts on the HZ boundary (1ms).
52 */
53
54 struct netem_sched_data {
55 struct Qdisc *qdisc;
56 struct qdisc_watchdog watchdog;
57
58 psched_tdiff_t latency;
59 psched_tdiff_t jitter;
60
61 u32 loss;
62 u32 limit;
63 u32 counter;
64 u32 gap;
65 u32 duplicate;
66 u32 reorder;
67 u32 corrupt;
68
69 struct crndstate {
70 u32 last;
71 u32 rho;
72 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
73
74 struct disttable {
75 u32 size;
76 s16 table[0];
77 } *delay_dist;
78 };
79
80 /* Time stamp put into socket buffer control block */
81 struct netem_skb_cb {
82 psched_time_t time_to_send;
83 };
84
85 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
86 {
87 BUILD_BUG_ON(sizeof(skb->cb) <
88 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
89 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
90 }
91
92 /* init_crandom - initialize correlated random number generator
93 * Use entropy source for initial seed.
94 */
95 static void init_crandom(struct crndstate *state, unsigned long rho)
96 {
97 state->rho = rho;
98 state->last = net_random();
99 }
100
101 /* get_crandom - correlated random number generator
102 * Next number depends on last value.
103 * rho is scaled to avoid floating point.
104 */
105 static u32 get_crandom(struct crndstate *state)
106 {
107 u64 value, rho;
108 unsigned long answer;
109
110 if (state->rho == 0) /* no correlation */
111 return net_random();
112
113 value = net_random();
114 rho = (u64)state->rho + 1;
115 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
116 state->last = answer;
117 return answer;
118 }
119
120 /* tabledist - return a pseudo-randomly distributed value with mean mu and
121 * std deviation sigma. Uses table lookup to approximate the desired
122 * distribution, and a uniformly-distributed pseudo-random source.
123 */
124 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
125 struct crndstate *state,
126 const struct disttable *dist)
127 {
128 psched_tdiff_t x;
129 long t;
130 u32 rnd;
131
132 if (sigma == 0)
133 return mu;
134
135 rnd = get_crandom(state);
136
137 /* default uniform distribution */
138 if (dist == NULL)
139 return (rnd % (2*sigma)) - sigma + mu;
140
141 t = dist->table[rnd % dist->size];
142 x = (sigma % NETEM_DIST_SCALE) * t;
143 if (x >= 0)
144 x += NETEM_DIST_SCALE/2;
145 else
146 x -= NETEM_DIST_SCALE/2;
147
148 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
149 }
150
151 /*
152 * Insert one skb into qdisc.
153 * Note: parent depends on return value to account for queue length.
154 * NET_XMIT_DROP: queue length didn't change.
155 * NET_XMIT_SUCCESS: one skb was queued.
156 */
157 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
158 {
159 struct netem_sched_data *q = qdisc_priv(sch);
160 /* We don't fill cb now as skb_unshare() may invalidate it */
161 struct netem_skb_cb *cb;
162 struct sk_buff *skb2;
163 int ret;
164 int count = 1;
165
166 pr_debug("netem_enqueue skb=%p\n", skb);
167
168 /* Random duplication */
169 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
170 ++count;
171
172 /* Random packet drop 0 => none, ~0 => all */
173 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
174 --count;
175
176 if (count == 0) {
177 sch->qstats.drops++;
178 kfree_skb(skb);
179 return NET_XMIT_BYPASS;
180 }
181
182 skb_orphan(skb);
183
184 /*
185 * If we need to duplicate packet, then re-insert at top of the
186 * qdisc tree, since parent queuer expects that only one
187 * skb will be queued.
188 */
189 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
190 struct Qdisc *rootq = qdisc_root(sch);
191 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
192 q->duplicate = 0;
193
194 qdisc_enqueue_root(skb2, rootq);
195 q->duplicate = dupsave;
196 }
197
198 /*
199 * Randomized packet corruption.
200 * Make copy if needed since we are modifying
201 * If packet is going to be hardware checksummed, then
202 * do it now in software before we mangle it.
203 */
204 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
205 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
206 || (skb->ip_summed == CHECKSUM_PARTIAL
207 && skb_checksum_help(skb))) {
208 sch->qstats.drops++;
209 return NET_XMIT_DROP;
210 }
211
212 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
213 }
214
215 cb = netem_skb_cb(skb);
216 if (q->gap == 0 /* not doing reordering */
217 || q->counter < q->gap /* inside last reordering gap */
218 || q->reorder < get_crandom(&q->reorder_cor)) {
219 psched_time_t now;
220 psched_tdiff_t delay;
221
222 delay = tabledist(q->latency, q->jitter,
223 &q->delay_cor, q->delay_dist);
224
225 now = psched_get_time();
226 cb->time_to_send = now + delay;
227 ++q->counter;
228 ret = qdisc_enqueue(skb, q->qdisc);
229 } else {
230 /*
231 * Do re-ordering by putting one out of N packets at the front
232 * of the queue.
233 */
234 cb->time_to_send = psched_get_time();
235 q->counter = 0;
236 ret = q->qdisc->ops->requeue(skb, q->qdisc);
237 }
238
239 if (likely(ret == NET_XMIT_SUCCESS)) {
240 sch->q.qlen++;
241 sch->bstats.bytes += qdisc_pkt_len(skb);
242 sch->bstats.packets++;
243 } else
244 sch->qstats.drops++;
245
246 pr_debug("netem: enqueue ret %d\n", ret);
247 return ret;
248 }
249
250 /* Requeue packets but don't change time stamp */
251 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
252 {
253 struct netem_sched_data *q = qdisc_priv(sch);
254 int ret;
255
256 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
257 sch->q.qlen++;
258 sch->qstats.requeues++;
259 }
260
261 return ret;
262 }
263
264 static unsigned int netem_drop(struct Qdisc* sch)
265 {
266 struct netem_sched_data *q = qdisc_priv(sch);
267 unsigned int len = 0;
268
269 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
270 sch->q.qlen--;
271 sch->qstats.drops++;
272 }
273 return len;
274 }
275
276 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
277 {
278 struct netem_sched_data *q = qdisc_priv(sch);
279 struct sk_buff *skb;
280
281 smp_mb();
282 if (sch->flags & TCQ_F_THROTTLED)
283 return NULL;
284
285 skb = q->qdisc->dequeue(q->qdisc);
286 if (skb) {
287 const struct netem_skb_cb *cb = netem_skb_cb(skb);
288 psched_time_t now = psched_get_time();
289
290 /* if more time remaining? */
291 if (cb->time_to_send <= now) {
292 pr_debug("netem_dequeue: return skb=%p\n", skb);
293 sch->q.qlen--;
294 return skb;
295 }
296
297 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
298 qdisc_tree_decrease_qlen(q->qdisc, 1);
299 sch->qstats.drops++;
300 printk(KERN_ERR "netem: %s could not requeue\n",
301 q->qdisc->ops->id);
302 }
303
304 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
305 }
306
307 return NULL;
308 }
309
310 static void netem_reset(struct Qdisc *sch)
311 {
312 struct netem_sched_data *q = qdisc_priv(sch);
313
314 qdisc_reset(q->qdisc);
315 sch->q.qlen = 0;
316 qdisc_watchdog_cancel(&q->watchdog);
317 }
318
319 /*
320 * Distribution data is a variable size payload containing
321 * signed 16 bit values.
322 */
323 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
324 {
325 struct netem_sched_data *q = qdisc_priv(sch);
326 unsigned long n = nla_len(attr)/sizeof(__s16);
327 const __s16 *data = nla_data(attr);
328 spinlock_t *root_lock;
329 struct disttable *d;
330 int i;
331
332 if (n > 65536)
333 return -EINVAL;
334
335 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
336 if (!d)
337 return -ENOMEM;
338
339 d->size = n;
340 for (i = 0; i < n; i++)
341 d->table[i] = data[i];
342
343 root_lock = qdisc_root_lock(sch);
344
345 spin_lock_bh(root_lock);
346 d = xchg(&q->delay_dist, d);
347 spin_unlock_bh(root_lock);
348
349 kfree(d);
350 return 0;
351 }
352
353 static int get_correlation(struct Qdisc *sch, const struct nlattr *attr)
354 {
355 struct netem_sched_data *q = qdisc_priv(sch);
356 const struct tc_netem_corr *c = nla_data(attr);
357
358 init_crandom(&q->delay_cor, c->delay_corr);
359 init_crandom(&q->loss_cor, c->loss_corr);
360 init_crandom(&q->dup_cor, c->dup_corr);
361 return 0;
362 }
363
364 static int get_reorder(struct Qdisc *sch, const struct nlattr *attr)
365 {
366 struct netem_sched_data *q = qdisc_priv(sch);
367 const struct tc_netem_reorder *r = nla_data(attr);
368
369 q->reorder = r->probability;
370 init_crandom(&q->reorder_cor, r->correlation);
371 return 0;
372 }
373
374 static int get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
375 {
376 struct netem_sched_data *q = qdisc_priv(sch);
377 const struct tc_netem_corrupt *r = nla_data(attr);
378
379 q->corrupt = r->probability;
380 init_crandom(&q->corrupt_cor, r->correlation);
381 return 0;
382 }
383
384 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
385 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
386 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
387 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
388 };
389
390 /* Parse netlink message to set options */
391 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
392 {
393 struct netem_sched_data *q = qdisc_priv(sch);
394 struct nlattr *tb[TCA_NETEM_MAX + 1];
395 struct tc_netem_qopt *qopt;
396 int ret;
397
398 if (opt == NULL)
399 return -EINVAL;
400
401 ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy,
402 qopt, sizeof(*qopt));
403 if (ret < 0)
404 return ret;
405
406 ret = fifo_set_limit(q->qdisc, qopt->limit);
407 if (ret) {
408 pr_debug("netem: can't set fifo limit\n");
409 return ret;
410 }
411
412 q->latency = qopt->latency;
413 q->jitter = qopt->jitter;
414 q->limit = qopt->limit;
415 q->gap = qopt->gap;
416 q->counter = 0;
417 q->loss = qopt->loss;
418 q->duplicate = qopt->duplicate;
419
420 /* for compatibility with earlier versions.
421 * if gap is set, need to assume 100% probability
422 */
423 if (q->gap)
424 q->reorder = ~0;
425
426 if (tb[TCA_NETEM_CORR]) {
427 ret = get_correlation(sch, tb[TCA_NETEM_CORR]);
428 if (ret)
429 return ret;
430 }
431
432 if (tb[TCA_NETEM_DELAY_DIST]) {
433 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
434 if (ret)
435 return ret;
436 }
437
438 if (tb[TCA_NETEM_REORDER]) {
439 ret = get_reorder(sch, tb[TCA_NETEM_REORDER]);
440 if (ret)
441 return ret;
442 }
443
444 if (tb[TCA_NETEM_CORRUPT]) {
445 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
446 if (ret)
447 return ret;
448 }
449
450 return 0;
451 }
452
453 /*
454 * Special case version of FIFO queue for use by netem.
455 * It queues in order based on timestamps in skb's
456 */
457 struct fifo_sched_data {
458 u32 limit;
459 psched_time_t oldest;
460 };
461
462 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
463 {
464 struct fifo_sched_data *q = qdisc_priv(sch);
465 struct sk_buff_head *list = &sch->q;
466 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
467 struct sk_buff *skb;
468
469 if (likely(skb_queue_len(list) < q->limit)) {
470 /* Optimize for add at tail */
471 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
472 q->oldest = tnext;
473 return qdisc_enqueue_tail(nskb, sch);
474 }
475
476 skb_queue_reverse_walk(list, skb) {
477 const struct netem_skb_cb *cb = netem_skb_cb(skb);
478
479 if (tnext >= cb->time_to_send)
480 break;
481 }
482
483 __skb_queue_after(list, skb, nskb);
484
485 sch->qstats.backlog += qdisc_pkt_len(nskb);
486 sch->bstats.bytes += qdisc_pkt_len(nskb);
487 sch->bstats.packets++;
488
489 return NET_XMIT_SUCCESS;
490 }
491
492 return qdisc_reshape_fail(nskb, sch);
493 }
494
495 static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
496 {
497 struct fifo_sched_data *q = qdisc_priv(sch);
498
499 if (opt) {
500 struct tc_fifo_qopt *ctl = nla_data(opt);
501 if (nla_len(opt) < sizeof(*ctl))
502 return -EINVAL;
503
504 q->limit = ctl->limit;
505 } else
506 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
507
508 q->oldest = PSCHED_PASTPERFECT;
509 return 0;
510 }
511
512 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
513 {
514 struct fifo_sched_data *q = qdisc_priv(sch);
515 struct tc_fifo_qopt opt = { .limit = q->limit };
516
517 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
518 return skb->len;
519
520 nla_put_failure:
521 return -1;
522 }
523
524 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
525 .id = "tfifo",
526 .priv_size = sizeof(struct fifo_sched_data),
527 .enqueue = tfifo_enqueue,
528 .dequeue = qdisc_dequeue_head,
529 .requeue = qdisc_requeue,
530 .drop = qdisc_queue_drop,
531 .init = tfifo_init,
532 .reset = qdisc_reset_queue,
533 .change = tfifo_init,
534 .dump = tfifo_dump,
535 };
536
537 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
538 {
539 struct netem_sched_data *q = qdisc_priv(sch);
540 int ret;
541
542 if (!opt)
543 return -EINVAL;
544
545 qdisc_watchdog_init(&q->watchdog, sch);
546
547 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
548 &tfifo_qdisc_ops,
549 TC_H_MAKE(sch->handle, 1));
550 if (!q->qdisc) {
551 pr_debug("netem: qdisc create failed\n");
552 return -ENOMEM;
553 }
554
555 ret = netem_change(sch, opt);
556 if (ret) {
557 pr_debug("netem: change failed\n");
558 qdisc_destroy(q->qdisc);
559 }
560 return ret;
561 }
562
563 static void netem_destroy(struct Qdisc *sch)
564 {
565 struct netem_sched_data *q = qdisc_priv(sch);
566
567 qdisc_watchdog_cancel(&q->watchdog);
568 qdisc_destroy(q->qdisc);
569 kfree(q->delay_dist);
570 }
571
572 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
573 {
574 const struct netem_sched_data *q = qdisc_priv(sch);
575 unsigned char *b = skb_tail_pointer(skb);
576 struct nlattr *nla = (struct nlattr *) b;
577 struct tc_netem_qopt qopt;
578 struct tc_netem_corr cor;
579 struct tc_netem_reorder reorder;
580 struct tc_netem_corrupt corrupt;
581
582 qopt.latency = q->latency;
583 qopt.jitter = q->jitter;
584 qopt.limit = q->limit;
585 qopt.loss = q->loss;
586 qopt.gap = q->gap;
587 qopt.duplicate = q->duplicate;
588 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
589
590 cor.delay_corr = q->delay_cor.rho;
591 cor.loss_corr = q->loss_cor.rho;
592 cor.dup_corr = q->dup_cor.rho;
593 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
594
595 reorder.probability = q->reorder;
596 reorder.correlation = q->reorder_cor.rho;
597 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
598
599 corrupt.probability = q->corrupt;
600 corrupt.correlation = q->corrupt_cor.rho;
601 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
602
603 nla->nla_len = skb_tail_pointer(skb) - b;
604
605 return skb->len;
606
607 nla_put_failure:
608 nlmsg_trim(skb, b);
609 return -1;
610 }
611
612 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
613 struct sk_buff *skb, struct tcmsg *tcm)
614 {
615 struct netem_sched_data *q = qdisc_priv(sch);
616
617 if (cl != 1) /* only one class */
618 return -ENOENT;
619
620 tcm->tcm_handle |= TC_H_MIN(1);
621 tcm->tcm_info = q->qdisc->handle;
622
623 return 0;
624 }
625
626 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
627 struct Qdisc **old)
628 {
629 struct netem_sched_data *q = qdisc_priv(sch);
630
631 if (new == NULL)
632 new = &noop_qdisc;
633
634 sch_tree_lock(sch);
635 *old = xchg(&q->qdisc, new);
636 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
637 qdisc_reset(*old);
638 sch_tree_unlock(sch);
639
640 return 0;
641 }
642
643 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
644 {
645 struct netem_sched_data *q = qdisc_priv(sch);
646 return q->qdisc;
647 }
648
649 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
650 {
651 return 1;
652 }
653
654 static void netem_put(struct Qdisc *sch, unsigned long arg)
655 {
656 }
657
658 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
659 struct nlattr **tca, unsigned long *arg)
660 {
661 return -ENOSYS;
662 }
663
664 static int netem_delete(struct Qdisc *sch, unsigned long arg)
665 {
666 return -ENOSYS;
667 }
668
669 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
670 {
671 if (!walker->stop) {
672 if (walker->count >= walker->skip)
673 if (walker->fn(sch, 1, walker) < 0) {
674 walker->stop = 1;
675 return;
676 }
677 walker->count++;
678 }
679 }
680
681 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
682 {
683 return NULL;
684 }
685
686 static const struct Qdisc_class_ops netem_class_ops = {
687 .graft = netem_graft,
688 .leaf = netem_leaf,
689 .get = netem_get,
690 .put = netem_put,
691 .change = netem_change_class,
692 .delete = netem_delete,
693 .walk = netem_walk,
694 .tcf_chain = netem_find_tcf,
695 .dump = netem_dump_class,
696 };
697
698 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
699 .id = "netem",
700 .cl_ops = &netem_class_ops,
701 .priv_size = sizeof(struct netem_sched_data),
702 .enqueue = netem_enqueue,
703 .dequeue = netem_dequeue,
704 .requeue = netem_requeue,
705 .drop = netem_drop,
706 .init = netem_init,
707 .reset = netem_reset,
708 .destroy = netem_destroy,
709 .change = netem_change,
710 .dump = netem_dump,
711 .owner = THIS_MODULE,
712 };
713
714
715 static int __init netem_module_init(void)
716 {
717 pr_info("netem: version " VERSION "\n");
718 return register_qdisc(&netem_qdisc_ops);
719 }
720 static void __exit netem_module_exit(void)
721 {
722 unregister_qdisc(&netem_qdisc_ops);
723 }
724 module_init(netem_module_init)
725 module_exit(netem_module_exit)
726 MODULE_LICENSE("GPL");
This page took 0.092513 seconds and 5 git commands to generate.