2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/module.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
25 #include <net/pkt_sched.h>
29 /* Network Emulation Queuing algorithm.
30 ====================================
32 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
33 Network Emulation Tool
34 [2] Luigi Rizzo, DummyNet for FreeBSD
36 ----------------------------------------------------------------
38 This started out as a simple way to delay outgoing packets to
39 test TCP but has grown to include most of the functionality
40 of a full blown network emulator like NISTnet. It can delay
41 packets and add random jitter (and correlation). The random
42 distribution can be loaded from a table as well to provide
43 normal, Pareto, or experimental curves. Packet loss,
44 duplication, and reordering can also be emulated.
46 This qdisc does not do classification that can be handled in
47 layering other disciplines. It does not need to do bandwidth
48 control either since that can be handled by using token
49 bucket or other rate control.
51 The simulator is limited by the Linux timer resolution
52 and will create packet bursts on the HZ boundary (1ms).
55 struct netem_sched_data
{
57 struct qdisc_watchdog watchdog
;
72 } delay_cor
, loss_cor
, dup_cor
, reorder_cor
, corrupt_cor
;
80 /* Time stamp put into socket buffer control block */
82 psched_time_t time_to_send
;
85 /* init_crandom - initialize correlated random number generator
86 * Use entropy source for initial seed.
88 static void init_crandom(struct crndstate
*state
, unsigned long rho
)
91 state
->last
= net_random();
94 /* get_crandom - correlated random number generator
95 * Next number depends on last value.
96 * rho is scaled to avoid floating point.
98 static unsigned long get_crandom(struct crndstate
*state
)
101 unsigned long answer
;
103 if (state
->rho
== 0) /* no correllation */
106 value
= net_random();
107 rho
= (u64
)state
->rho
+ 1;
108 answer
= (value
* ((1ull<<32) - rho
) + state
->last
* rho
) >> 32;
109 state
->last
= answer
;
113 /* tabledist - return a pseudo-randomly distributed value with mean mu and
114 * std deviation sigma. Uses table lookup to approximate the desired
115 * distribution, and a uniformly-distributed pseudo-random source.
117 static long tabledist(unsigned long mu
, long sigma
,
118 struct crndstate
*state
, const struct disttable
*dist
)
126 rnd
= get_crandom(state
);
128 /* default uniform distribution */
130 return (rnd
% (2*sigma
)) - sigma
+ mu
;
132 t
= dist
->table
[rnd
% dist
->size
];
133 x
= (sigma
% NETEM_DIST_SCALE
) * t
;
135 x
+= NETEM_DIST_SCALE
/2;
137 x
-= NETEM_DIST_SCALE
/2;
139 return x
/ NETEM_DIST_SCALE
+ (sigma
/ NETEM_DIST_SCALE
) * t
+ mu
;
143 * Insert one skb into qdisc.
144 * Note: parent depends on return value to account for queue length.
145 * NET_XMIT_DROP: queue length didn't change.
146 * NET_XMIT_SUCCESS: one skb was queued.
148 static int netem_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
150 struct netem_sched_data
*q
= qdisc_priv(sch
);
151 /* We don't fill cb now as skb_unshare() may invalidate it */
152 struct netem_skb_cb
*cb
;
153 struct sk_buff
*skb2
;
157 pr_debug("netem_enqueue skb=%p\n", skb
);
159 /* Random duplication */
160 if (q
->duplicate
&& q
->duplicate
>= get_crandom(&q
->dup_cor
))
163 /* Random packet drop 0 => none, ~0 => all */
164 if (q
->loss
&& q
->loss
>= get_crandom(&q
->loss_cor
))
170 return NET_XMIT_BYPASS
;
176 * If we need to duplicate packet, then re-insert at top of the
177 * qdisc tree, since parent queuer expects that only one
178 * skb will be queued.
180 if (count
> 1 && (skb2
= skb_clone(skb
, GFP_ATOMIC
)) != NULL
) {
181 struct Qdisc
*rootq
= sch
->dev
->qdisc
;
182 u32 dupsave
= q
->duplicate
; /* prevent duplicating a dup... */
185 rootq
->enqueue(skb2
, rootq
);
186 q
->duplicate
= dupsave
;
190 * Randomized packet corruption.
191 * Make copy if needed since we are modifying
192 * If packet is going to be hardware checksummed, then
193 * do it now in software before we mangle it.
195 if (q
->corrupt
&& q
->corrupt
>= get_crandom(&q
->corrupt_cor
)) {
196 if (!(skb
= skb_unshare(skb
, GFP_ATOMIC
))
197 || (skb
->ip_summed
== CHECKSUM_PARTIAL
198 && skb_checksum_help(skb
))) {
200 return NET_XMIT_DROP
;
203 skb
->data
[net_random() % skb_headlen(skb
)] ^= 1<<(net_random() % 8);
206 cb
= (struct netem_skb_cb
*)skb
->cb
;
207 if (q
->gap
== 0 /* not doing reordering */
208 || q
->counter
< q
->gap
/* inside last reordering gap */
209 || q
->reorder
< get_crandom(&q
->reorder_cor
)) {
211 psched_tdiff_t delay
;
213 delay
= tabledist(q
->latency
, q
->jitter
,
214 &q
->delay_cor
, q
->delay_dist
);
216 PSCHED_GET_TIME(now
);
217 PSCHED_TADD2(now
, delay
, cb
->time_to_send
);
219 ret
= q
->qdisc
->enqueue(skb
, q
->qdisc
);
222 * Do re-ordering by putting one out of N packets at the front
225 PSCHED_GET_TIME(cb
->time_to_send
);
227 ret
= q
->qdisc
->ops
->requeue(skb
, q
->qdisc
);
230 if (likely(ret
== NET_XMIT_SUCCESS
)) {
232 sch
->bstats
.bytes
+= skb
->len
;
233 sch
->bstats
.packets
++;
237 pr_debug("netem: enqueue ret %d\n", ret
);
241 /* Requeue packets but don't change time stamp */
242 static int netem_requeue(struct sk_buff
*skb
, struct Qdisc
*sch
)
244 struct netem_sched_data
*q
= qdisc_priv(sch
);
247 if ((ret
= q
->qdisc
->ops
->requeue(skb
, q
->qdisc
)) == 0) {
249 sch
->qstats
.requeues
++;
255 static unsigned int netem_drop(struct Qdisc
* sch
)
257 struct netem_sched_data
*q
= qdisc_priv(sch
);
258 unsigned int len
= 0;
260 if (q
->qdisc
->ops
->drop
&& (len
= q
->qdisc
->ops
->drop(q
->qdisc
)) != 0) {
267 static struct sk_buff
*netem_dequeue(struct Qdisc
*sch
)
269 struct netem_sched_data
*q
= qdisc_priv(sch
);
272 skb
= q
->qdisc
->dequeue(q
->qdisc
);
274 const struct netem_skb_cb
*cb
275 = (const struct netem_skb_cb
*)skb
->cb
;
278 /* if more time remaining? */
279 PSCHED_GET_TIME(now
);
281 if (PSCHED_TLESS(cb
->time_to_send
, now
)) {
282 pr_debug("netem_dequeue: return skb=%p\n", skb
);
284 sch
->flags
&= ~TCQ_F_THROTTLED
;
287 qdisc_watchdog_schedule(&q
->watchdog
, cb
->time_to_send
);
289 if (q
->qdisc
->ops
->requeue(skb
, q
->qdisc
) != NET_XMIT_SUCCESS
) {
290 qdisc_tree_decrease_qlen(q
->qdisc
, 1);
292 printk(KERN_ERR
"netem: queue discpline %s could not requeue\n",
301 static void netem_reset(struct Qdisc
*sch
)
303 struct netem_sched_data
*q
= qdisc_priv(sch
);
305 qdisc_reset(q
->qdisc
);
307 qdisc_watchdog_cancel(&q
->watchdog
);
310 /* Pass size change message down to embedded FIFO */
311 static int set_fifo_limit(struct Qdisc
*q
, int limit
)
316 /* Hack to avoid sending change message to non-FIFO */
317 if (strncmp(q
->ops
->id
+ 1, "fifo", 4) != 0)
320 rta
= kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt
)), GFP_KERNEL
);
322 rta
->rta_type
= RTM_NEWQDISC
;
323 rta
->rta_len
= RTA_LENGTH(sizeof(struct tc_fifo_qopt
));
324 ((struct tc_fifo_qopt
*)RTA_DATA(rta
))->limit
= limit
;
326 ret
= q
->ops
->change(q
, rta
);
333 * Distribution data is a variable size payload containing
334 * signed 16 bit values.
336 static int get_dist_table(struct Qdisc
*sch
, const struct rtattr
*attr
)
338 struct netem_sched_data
*q
= qdisc_priv(sch
);
339 unsigned long n
= RTA_PAYLOAD(attr
)/sizeof(__s16
);
340 const __s16
*data
= RTA_DATA(attr
);
347 d
= kmalloc(sizeof(*d
) + n
*sizeof(d
->table
[0]), GFP_KERNEL
);
352 for (i
= 0; i
< n
; i
++)
353 d
->table
[i
] = data
[i
];
355 spin_lock_bh(&sch
->dev
->queue_lock
);
356 d
= xchg(&q
->delay_dist
, d
);
357 spin_unlock_bh(&sch
->dev
->queue_lock
);
363 static int get_correlation(struct Qdisc
*sch
, const struct rtattr
*attr
)
365 struct netem_sched_data
*q
= qdisc_priv(sch
);
366 const struct tc_netem_corr
*c
= RTA_DATA(attr
);
368 if (RTA_PAYLOAD(attr
) != sizeof(*c
))
371 init_crandom(&q
->delay_cor
, c
->delay_corr
);
372 init_crandom(&q
->loss_cor
, c
->loss_corr
);
373 init_crandom(&q
->dup_cor
, c
->dup_corr
);
377 static int get_reorder(struct Qdisc
*sch
, const struct rtattr
*attr
)
379 struct netem_sched_data
*q
= qdisc_priv(sch
);
380 const struct tc_netem_reorder
*r
= RTA_DATA(attr
);
382 if (RTA_PAYLOAD(attr
) != sizeof(*r
))
385 q
->reorder
= r
->probability
;
386 init_crandom(&q
->reorder_cor
, r
->correlation
);
390 static int get_corrupt(struct Qdisc
*sch
, const struct rtattr
*attr
)
392 struct netem_sched_data
*q
= qdisc_priv(sch
);
393 const struct tc_netem_corrupt
*r
= RTA_DATA(attr
);
395 if (RTA_PAYLOAD(attr
) != sizeof(*r
))
398 q
->corrupt
= r
->probability
;
399 init_crandom(&q
->corrupt_cor
, r
->correlation
);
403 /* Parse netlink message to set options */
404 static int netem_change(struct Qdisc
*sch
, struct rtattr
*opt
)
406 struct netem_sched_data
*q
= qdisc_priv(sch
);
407 struct tc_netem_qopt
*qopt
;
410 if (opt
== NULL
|| RTA_PAYLOAD(opt
) < sizeof(*qopt
))
413 qopt
= RTA_DATA(opt
);
414 ret
= set_fifo_limit(q
->qdisc
, qopt
->limit
);
416 pr_debug("netem: can't set fifo limit\n");
420 q
->latency
= qopt
->latency
;
421 q
->jitter
= qopt
->jitter
;
422 q
->limit
= qopt
->limit
;
425 q
->loss
= qopt
->loss
;
426 q
->duplicate
= qopt
->duplicate
;
428 /* for compatiablity with earlier versions.
429 * if gap is set, need to assume 100% probablity
433 /* Handle nested options after initial queue options.
434 * Should have put all options in nested format but too late now.
436 if (RTA_PAYLOAD(opt
) > sizeof(*qopt
)) {
437 struct rtattr
*tb
[TCA_NETEM_MAX
];
438 if (rtattr_parse(tb
, TCA_NETEM_MAX
,
439 RTA_DATA(opt
) + sizeof(*qopt
),
440 RTA_PAYLOAD(opt
) - sizeof(*qopt
)))
443 if (tb
[TCA_NETEM_CORR
-1]) {
444 ret
= get_correlation(sch
, tb
[TCA_NETEM_CORR
-1]);
449 if (tb
[TCA_NETEM_DELAY_DIST
-1]) {
450 ret
= get_dist_table(sch
, tb
[TCA_NETEM_DELAY_DIST
-1]);
455 if (tb
[TCA_NETEM_REORDER
-1]) {
456 ret
= get_reorder(sch
, tb
[TCA_NETEM_REORDER
-1]);
461 if (tb
[TCA_NETEM_CORRUPT
-1]) {
462 ret
= get_corrupt(sch
, tb
[TCA_NETEM_CORRUPT
-1]);
472 * Special case version of FIFO queue for use by netem.
473 * It queues in order based on timestamps in skb's
475 struct fifo_sched_data
{
479 static int tfifo_enqueue(struct sk_buff
*nskb
, struct Qdisc
*sch
)
481 struct fifo_sched_data
*q
= qdisc_priv(sch
);
482 struct sk_buff_head
*list
= &sch
->q
;
483 const struct netem_skb_cb
*ncb
484 = (const struct netem_skb_cb
*)nskb
->cb
;
487 if (likely(skb_queue_len(list
) < q
->limit
)) {
488 skb_queue_reverse_walk(list
, skb
) {
489 const struct netem_skb_cb
*cb
490 = (const struct netem_skb_cb
*)skb
->cb
;
492 if (!PSCHED_TLESS(ncb
->time_to_send
, cb
->time_to_send
))
496 __skb_queue_after(list
, skb
, nskb
);
498 sch
->qstats
.backlog
+= nskb
->len
;
499 sch
->bstats
.bytes
+= nskb
->len
;
500 sch
->bstats
.packets
++;
502 return NET_XMIT_SUCCESS
;
505 return qdisc_drop(nskb
, sch
);
508 static int tfifo_init(struct Qdisc
*sch
, struct rtattr
*opt
)
510 struct fifo_sched_data
*q
= qdisc_priv(sch
);
513 struct tc_fifo_qopt
*ctl
= RTA_DATA(opt
);
514 if (RTA_PAYLOAD(opt
) < sizeof(*ctl
))
517 q
->limit
= ctl
->limit
;
519 q
->limit
= max_t(u32
, sch
->dev
->tx_queue_len
, 1);
524 static int tfifo_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
526 struct fifo_sched_data
*q
= qdisc_priv(sch
);
527 struct tc_fifo_qopt opt
= { .limit
= q
->limit
};
529 RTA_PUT(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
);
536 static struct Qdisc_ops tfifo_qdisc_ops
= {
538 .priv_size
= sizeof(struct fifo_sched_data
),
539 .enqueue
= tfifo_enqueue
,
540 .dequeue
= qdisc_dequeue_head
,
541 .requeue
= qdisc_requeue
,
542 .drop
= qdisc_queue_drop
,
544 .reset
= qdisc_reset_queue
,
545 .change
= tfifo_init
,
549 static int netem_init(struct Qdisc
*sch
, struct rtattr
*opt
)
551 struct netem_sched_data
*q
= qdisc_priv(sch
);
557 qdisc_watchdog_init(&q
->watchdog
, sch
);
559 q
->qdisc
= qdisc_create_dflt(sch
->dev
, &tfifo_qdisc_ops
,
560 TC_H_MAKE(sch
->handle
, 1));
562 pr_debug("netem: qdisc create failed\n");
566 ret
= netem_change(sch
, opt
);
568 pr_debug("netem: change failed\n");
569 qdisc_destroy(q
->qdisc
);
574 static void netem_destroy(struct Qdisc
*sch
)
576 struct netem_sched_data
*q
= qdisc_priv(sch
);
578 qdisc_watchdog_cancel(&q
->watchdog
);
579 qdisc_destroy(q
->qdisc
);
580 kfree(q
->delay_dist
);
583 static int netem_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
585 const struct netem_sched_data
*q
= qdisc_priv(sch
);
586 unsigned char *b
= skb
->tail
;
587 struct rtattr
*rta
= (struct rtattr
*) b
;
588 struct tc_netem_qopt qopt
;
589 struct tc_netem_corr cor
;
590 struct tc_netem_reorder reorder
;
591 struct tc_netem_corrupt corrupt
;
593 qopt
.latency
= q
->latency
;
594 qopt
.jitter
= q
->jitter
;
595 qopt
.limit
= q
->limit
;
598 qopt
.duplicate
= q
->duplicate
;
599 RTA_PUT(skb
, TCA_OPTIONS
, sizeof(qopt
), &qopt
);
601 cor
.delay_corr
= q
->delay_cor
.rho
;
602 cor
.loss_corr
= q
->loss_cor
.rho
;
603 cor
.dup_corr
= q
->dup_cor
.rho
;
604 RTA_PUT(skb
, TCA_NETEM_CORR
, sizeof(cor
), &cor
);
606 reorder
.probability
= q
->reorder
;
607 reorder
.correlation
= q
->reorder_cor
.rho
;
608 RTA_PUT(skb
, TCA_NETEM_REORDER
, sizeof(reorder
), &reorder
);
610 corrupt
.probability
= q
->corrupt
;
611 corrupt
.correlation
= q
->corrupt_cor
.rho
;
612 RTA_PUT(skb
, TCA_NETEM_CORRUPT
, sizeof(corrupt
), &corrupt
);
614 rta
->rta_len
= skb
->tail
- b
;
619 skb_trim(skb
, b
- skb
->data
);
623 static int netem_dump_class(struct Qdisc
*sch
, unsigned long cl
,
624 struct sk_buff
*skb
, struct tcmsg
*tcm
)
626 struct netem_sched_data
*q
= qdisc_priv(sch
);
628 if (cl
!= 1) /* only one class */
631 tcm
->tcm_handle
|= TC_H_MIN(1);
632 tcm
->tcm_info
= q
->qdisc
->handle
;
637 static int netem_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
640 struct netem_sched_data
*q
= qdisc_priv(sch
);
646 *old
= xchg(&q
->qdisc
, new);
647 qdisc_tree_decrease_qlen(*old
, (*old
)->q
.qlen
);
649 sch_tree_unlock(sch
);
654 static struct Qdisc
*netem_leaf(struct Qdisc
*sch
, unsigned long arg
)
656 struct netem_sched_data
*q
= qdisc_priv(sch
);
660 static unsigned long netem_get(struct Qdisc
*sch
, u32 classid
)
665 static void netem_put(struct Qdisc
*sch
, unsigned long arg
)
669 static int netem_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
670 struct rtattr
**tca
, unsigned long *arg
)
675 static int netem_delete(struct Qdisc
*sch
, unsigned long arg
)
680 static void netem_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
683 if (walker
->count
>= walker
->skip
)
684 if (walker
->fn(sch
, 1, walker
) < 0) {
692 static struct tcf_proto
**netem_find_tcf(struct Qdisc
*sch
, unsigned long cl
)
697 static struct Qdisc_class_ops netem_class_ops
= {
698 .graft
= netem_graft
,
702 .change
= netem_change_class
,
703 .delete = netem_delete
,
705 .tcf_chain
= netem_find_tcf
,
706 .dump
= netem_dump_class
,
709 static struct Qdisc_ops netem_qdisc_ops
= {
711 .cl_ops
= &netem_class_ops
,
712 .priv_size
= sizeof(struct netem_sched_data
),
713 .enqueue
= netem_enqueue
,
714 .dequeue
= netem_dequeue
,
715 .requeue
= netem_requeue
,
718 .reset
= netem_reset
,
719 .destroy
= netem_destroy
,
720 .change
= netem_change
,
722 .owner
= THIS_MODULE
,
726 static int __init
netem_module_init(void)
728 pr_info("netem: version " VERSION
"\n");
729 return register_qdisc(&netem_qdisc_ops
);
731 static void __exit
netem_module_exit(void)
733 unregister_qdisc(&netem_qdisc_ops
);
735 module_init(netem_module_init
)
736 module_exit(netem_module_exit
)
737 MODULE_LICENSE("GPL");