Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[deliverable/linux.git] / net / sched / sch_netem.c
1 /*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
27
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
31
32 #define VERSION "1.3"
33
34 /* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
55
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
69 */
70
71 struct netem_sched_data {
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
74
75 /* optional qdisc for classful handling (NULL at netem init) */
76 struct Qdisc *qdisc;
77
78 struct qdisc_watchdog watchdog;
79
80 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
82
83 u32 loss;
84 u32 ecn;
85 u32 limit;
86 u32 counter;
87 u32 gap;
88 u32 duplicate;
89 u32 reorder;
90 u32 corrupt;
91 u32 rate;
92 s32 packet_overhead;
93 u32 cell_size;
94 u32 cell_size_reciprocal;
95 s32 cell_overhead;
96
97 struct crndstate {
98 u32 last;
99 u32 rho;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
113 /* Correlated Loss Generation models */
114 struct clgstate {
115 /* state of the Markov chain */
116 u8 state;
117
118 /* 4-states and Gilbert-Elliot models */
119 u32 a1; /* p13 for 4-states or p for GE */
120 u32 a2; /* p31 for 4-states or r for GE */
121 u32 a3; /* p32 for 4-states or h for GE */
122 u32 a4; /* p14 for 4-states or 1-k for GE */
123 u32 a5; /* p23 used only in 4-states */
124 } clg;
125
126 };
127
128 /* Time stamp put into socket buffer control block
129 * Only valid when skbs are in our internal t(ime)fifo queue.
130 */
131 struct netem_skb_cb {
132 psched_time_t time_to_send;
133 ktime_t tstamp_save;
134 };
135
136 /* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp
137 * to hold a rb_node structure.
138 *
139 * If struct sk_buff layout is changed, the following checks will complain.
140 */
141 static struct rb_node *netem_rb_node(struct sk_buff *skb)
142 {
143 BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0);
144 BUILD_BUG_ON(offsetof(struct sk_buff, prev) !=
145 offsetof(struct sk_buff, next) + sizeof(skb->next));
146 BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) !=
147 offsetof(struct sk_buff, prev) + sizeof(skb->prev));
148 BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) +
149 sizeof(skb->prev) +
150 sizeof(skb->tstamp));
151 return (struct rb_node *)&skb->next;
152 }
153
154 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
155 {
156 return (struct sk_buff *)rb;
157 }
158
159 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
160 {
161 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
162 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
163 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
164 }
165
166 /* init_crandom - initialize correlated random number generator
167 * Use entropy source for initial seed.
168 */
169 static void init_crandom(struct crndstate *state, unsigned long rho)
170 {
171 state->rho = rho;
172 state->last = net_random();
173 }
174
175 /* get_crandom - correlated random number generator
176 * Next number depends on last value.
177 * rho is scaled to avoid floating point.
178 */
179 static u32 get_crandom(struct crndstate *state)
180 {
181 u64 value, rho;
182 unsigned long answer;
183
184 if (state->rho == 0) /* no correlation */
185 return net_random();
186
187 value = net_random();
188 rho = (u64)state->rho + 1;
189 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
190 state->last = answer;
191 return answer;
192 }
193
194 /* loss_4state - 4-state model loss generator
195 * Generates losses according to the 4-state Markov chain adopted in
196 * the GI (General and Intuitive) loss model.
197 */
198 static bool loss_4state(struct netem_sched_data *q)
199 {
200 struct clgstate *clg = &q->clg;
201 u32 rnd = net_random();
202
203 /*
204 * Makes a comparison between rnd and the transition
205 * probabilities outgoing from the current state, then decides the
206 * next state and if the next packet has to be transmitted or lost.
207 * The four states correspond to:
208 * 1 => successfully transmitted packets within a gap period
209 * 4 => isolated losses within a gap period
210 * 3 => lost packets within a burst period
211 * 2 => successfully transmitted packets within a burst period
212 */
213 switch (clg->state) {
214 case 1:
215 if (rnd < clg->a4) {
216 clg->state = 4;
217 return true;
218 } else if (clg->a4 < rnd && rnd < clg->a1) {
219 clg->state = 3;
220 return true;
221 } else if (clg->a1 < rnd)
222 clg->state = 1;
223
224 break;
225 case 2:
226 if (rnd < clg->a5) {
227 clg->state = 3;
228 return true;
229 } else
230 clg->state = 2;
231
232 break;
233 case 3:
234 if (rnd < clg->a3)
235 clg->state = 2;
236 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
237 clg->state = 1;
238 return true;
239 } else if (clg->a2 + clg->a3 < rnd) {
240 clg->state = 3;
241 return true;
242 }
243 break;
244 case 4:
245 clg->state = 1;
246 break;
247 }
248
249 return false;
250 }
251
252 /* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
255 *
256 * Makes a comparison between random number and the transition
257 * probabilities outgoing from the current state, then decides the
258 * next state. A second random number is extracted and the comparison
259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
261 */
262 static bool loss_gilb_ell(struct netem_sched_data *q)
263 {
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
267 case 1:
268 if (net_random() < clg->a1)
269 clg->state = 2;
270 if (net_random() < clg->a4)
271 return true;
272 case 2:
273 if (net_random() < clg->a2)
274 clg->state = 1;
275 if (clg->a3 > net_random())
276 return true;
277 }
278
279 return false;
280 }
281
282 static bool loss_event(struct netem_sched_data *q)
283 {
284 switch (q->loss_model) {
285 case CLG_RANDOM:
286 /* Random packet drop 0 => none, ~0 => all */
287 return q->loss && q->loss >= get_crandom(&q->loss_cor);
288
289 case CLG_4_STATES:
290 /* 4state loss model algorithm (used also for GI model)
291 * Extracts a value from the markov 4 state loss generator,
292 * if it is 1 drops a packet and if needed writes the event in
293 * the kernel logs
294 */
295 return loss_4state(q);
296
297 case CLG_GILB_ELL:
298 /* Gilbert-Elliot loss model algorithm
299 * Extracts a value from the Gilbert-Elliot loss generator,
300 * if it is 1 drops a packet and if needed writes the event in
301 * the kernel logs
302 */
303 return loss_gilb_ell(q);
304 }
305
306 return false; /* not reached */
307 }
308
309
310 /* tabledist - return a pseudo-randomly distributed value with mean mu and
311 * std deviation sigma. Uses table lookup to approximate the desired
312 * distribution, and a uniformly-distributed pseudo-random source.
313 */
314 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
315 struct crndstate *state,
316 const struct disttable *dist)
317 {
318 psched_tdiff_t x;
319 long t;
320 u32 rnd;
321
322 if (sigma == 0)
323 return mu;
324
325 rnd = get_crandom(state);
326
327 /* default uniform distribution */
328 if (dist == NULL)
329 return (rnd % (2*sigma)) - sigma + mu;
330
331 t = dist->table[rnd % dist->size];
332 x = (sigma % NETEM_DIST_SCALE) * t;
333 if (x >= 0)
334 x += NETEM_DIST_SCALE/2;
335 else
336 x -= NETEM_DIST_SCALE/2;
337
338 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
339 }
340
341 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
342 {
343 u64 ticks;
344
345 len += q->packet_overhead;
346
347 if (q->cell_size) {
348 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
349
350 if (len > cells * q->cell_size) /* extra cell needed for remainder */
351 cells++;
352 len = cells * (q->cell_size + q->cell_overhead);
353 }
354
355 ticks = (u64)len * NSEC_PER_SEC;
356
357 do_div(ticks, q->rate);
358 return PSCHED_NS2TICKS(ticks);
359 }
360
361 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
362 {
363 struct netem_sched_data *q = qdisc_priv(sch);
364 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
365 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
366
367 while (*p) {
368 struct sk_buff *skb;
369
370 parent = *p;
371 skb = netem_rb_to_skb(parent);
372 if (tnext >= netem_skb_cb(skb)->time_to_send)
373 p = &parent->rb_right;
374 else
375 p = &parent->rb_left;
376 }
377 rb_link_node(netem_rb_node(nskb), parent, p);
378 rb_insert_color(netem_rb_node(nskb), &q->t_root);
379 sch->q.qlen++;
380 }
381
382 /*
383 * Insert one skb into qdisc.
384 * Note: parent depends on return value to account for queue length.
385 * NET_XMIT_DROP: queue length didn't change.
386 * NET_XMIT_SUCCESS: one skb was queued.
387 */
388 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
389 {
390 struct netem_sched_data *q = qdisc_priv(sch);
391 /* We don't fill cb now as skb_unshare() may invalidate it */
392 struct netem_skb_cb *cb;
393 struct sk_buff *skb2;
394 int count = 1;
395
396 /* Random duplication */
397 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
398 ++count;
399
400 /* Drop packet? */
401 if (loss_event(q)) {
402 if (q->ecn && INET_ECN_set_ce(skb))
403 sch->qstats.drops++; /* mark packet */
404 else
405 --count;
406 }
407 if (count == 0) {
408 sch->qstats.drops++;
409 kfree_skb(skb);
410 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
411 }
412
413 /* If a delay is expected, orphan the skb. (orphaning usually takes
414 * place at TX completion time, so _before_ the link transit delay)
415 */
416 if (q->latency || q->jitter)
417 skb_orphan_partial(skb);
418
419 /*
420 * If we need to duplicate packet, then re-insert at top of the
421 * qdisc tree, since parent queuer expects that only one
422 * skb will be queued.
423 */
424 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
425 struct Qdisc *rootq = qdisc_root(sch);
426 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
427 q->duplicate = 0;
428
429 qdisc_enqueue_root(skb2, rootq);
430 q->duplicate = dupsave;
431 }
432
433 /*
434 * Randomized packet corruption.
435 * Make copy if needed since we are modifying
436 * If packet is going to be hardware checksummed, then
437 * do it now in software before we mangle it.
438 */
439 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
440 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
441 (skb->ip_summed == CHECKSUM_PARTIAL &&
442 skb_checksum_help(skb)))
443 return qdisc_drop(skb, sch);
444
445 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
446 }
447
448 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
449 return qdisc_reshape_fail(skb, sch);
450
451 sch->qstats.backlog += qdisc_pkt_len(skb);
452
453 cb = netem_skb_cb(skb);
454 if (q->gap == 0 || /* not doing reordering */
455 q->counter < q->gap - 1 || /* inside last reordering gap */
456 q->reorder < get_crandom(&q->reorder_cor)) {
457 psched_time_t now;
458 psched_tdiff_t delay;
459
460 delay = tabledist(q->latency, q->jitter,
461 &q->delay_cor, q->delay_dist);
462
463 now = psched_get_time();
464
465 if (q->rate) {
466 struct sk_buff *last;
467
468 if (!skb_queue_empty(&sch->q))
469 last = skb_peek_tail(&sch->q);
470 else
471 last = netem_rb_to_skb(rb_last(&q->t_root));
472 if (last) {
473 /*
474 * Last packet in queue is reference point (now),
475 * calculate this time bonus and subtract
476 * from delay.
477 */
478 delay -= netem_skb_cb(last)->time_to_send - now;
479 delay = max_t(psched_tdiff_t, 0, delay);
480 now = netem_skb_cb(last)->time_to_send;
481 }
482
483 delay += packet_len_2_sched_time(skb->len, q);
484 }
485
486 cb->time_to_send = now + delay;
487 cb->tstamp_save = skb->tstamp;
488 ++q->counter;
489 tfifo_enqueue(skb, sch);
490 } else {
491 /*
492 * Do re-ordering by putting one out of N packets at the front
493 * of the queue.
494 */
495 cb->time_to_send = psched_get_time();
496 q->counter = 0;
497
498 __skb_queue_head(&sch->q, skb);
499 sch->qstats.requeues++;
500 }
501
502 return NET_XMIT_SUCCESS;
503 }
504
505 static unsigned int netem_drop(struct Qdisc *sch)
506 {
507 struct netem_sched_data *q = qdisc_priv(sch);
508 unsigned int len;
509
510 len = qdisc_queue_drop(sch);
511
512 if (!len) {
513 struct rb_node *p = rb_first(&q->t_root);
514
515 if (p) {
516 struct sk_buff *skb = netem_rb_to_skb(p);
517
518 rb_erase(p, &q->t_root);
519 sch->q.qlen--;
520 skb->next = NULL;
521 skb->prev = NULL;
522 len = qdisc_pkt_len(skb);
523 kfree_skb(skb);
524 }
525 }
526 if (!len && q->qdisc && q->qdisc->ops->drop)
527 len = q->qdisc->ops->drop(q->qdisc);
528 if (len)
529 sch->qstats.drops++;
530
531 return len;
532 }
533
534 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
535 {
536 struct netem_sched_data *q = qdisc_priv(sch);
537 struct sk_buff *skb;
538 struct rb_node *p;
539
540 if (qdisc_is_throttled(sch))
541 return NULL;
542
543 tfifo_dequeue:
544 skb = __skb_dequeue(&sch->q);
545 if (skb) {
546 deliver:
547 sch->qstats.backlog -= qdisc_pkt_len(skb);
548 qdisc_unthrottled(sch);
549 qdisc_bstats_update(sch, skb);
550 return skb;
551 }
552 p = rb_first(&q->t_root);
553 if (p) {
554 psched_time_t time_to_send;
555
556 skb = netem_rb_to_skb(p);
557
558 /* if more time remaining? */
559 time_to_send = netem_skb_cb(skb)->time_to_send;
560 if (time_to_send <= psched_get_time()) {
561 rb_erase(p, &q->t_root);
562
563 sch->q.qlen--;
564 skb->next = NULL;
565 skb->prev = NULL;
566 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
567
568 #ifdef CONFIG_NET_CLS_ACT
569 /*
570 * If it's at ingress let's pretend the delay is
571 * from the network (tstamp will be updated).
572 */
573 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
574 skb->tstamp.tv64 = 0;
575 #endif
576
577 if (q->qdisc) {
578 int err = qdisc_enqueue(skb, q->qdisc);
579
580 if (unlikely(err != NET_XMIT_SUCCESS)) {
581 if (net_xmit_drop_count(err)) {
582 sch->qstats.drops++;
583 qdisc_tree_decrease_qlen(sch, 1);
584 }
585 }
586 goto tfifo_dequeue;
587 }
588 goto deliver;
589 }
590
591 if (q->qdisc) {
592 skb = q->qdisc->ops->dequeue(q->qdisc);
593 if (skb)
594 goto deliver;
595 }
596 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
597 }
598
599 if (q->qdisc) {
600 skb = q->qdisc->ops->dequeue(q->qdisc);
601 if (skb)
602 goto deliver;
603 }
604 return NULL;
605 }
606
607 static void netem_reset(struct Qdisc *sch)
608 {
609 struct netem_sched_data *q = qdisc_priv(sch);
610
611 qdisc_reset_queue(sch);
612 if (q->qdisc)
613 qdisc_reset(q->qdisc);
614 qdisc_watchdog_cancel(&q->watchdog);
615 }
616
617 static void dist_free(struct disttable *d)
618 {
619 if (d) {
620 if (is_vmalloc_addr(d))
621 vfree(d);
622 else
623 kfree(d);
624 }
625 }
626
627 /*
628 * Distribution data is a variable size payload containing
629 * signed 16 bit values.
630 */
631 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
632 {
633 struct netem_sched_data *q = qdisc_priv(sch);
634 size_t n = nla_len(attr)/sizeof(__s16);
635 const __s16 *data = nla_data(attr);
636 spinlock_t *root_lock;
637 struct disttable *d;
638 int i;
639 size_t s;
640
641 if (n > NETEM_DIST_MAX)
642 return -EINVAL;
643
644 s = sizeof(struct disttable) + n * sizeof(s16);
645 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
646 if (!d)
647 d = vmalloc(s);
648 if (!d)
649 return -ENOMEM;
650
651 d->size = n;
652 for (i = 0; i < n; i++)
653 d->table[i] = data[i];
654
655 root_lock = qdisc_root_sleeping_lock(sch);
656
657 spin_lock_bh(root_lock);
658 swap(q->delay_dist, d);
659 spin_unlock_bh(root_lock);
660
661 dist_free(d);
662 return 0;
663 }
664
665 static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
666 {
667 struct netem_sched_data *q = qdisc_priv(sch);
668 const struct tc_netem_corr *c = nla_data(attr);
669
670 init_crandom(&q->delay_cor, c->delay_corr);
671 init_crandom(&q->loss_cor, c->loss_corr);
672 init_crandom(&q->dup_cor, c->dup_corr);
673 }
674
675 static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
676 {
677 struct netem_sched_data *q = qdisc_priv(sch);
678 const struct tc_netem_reorder *r = nla_data(attr);
679
680 q->reorder = r->probability;
681 init_crandom(&q->reorder_cor, r->correlation);
682 }
683
684 static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
685 {
686 struct netem_sched_data *q = qdisc_priv(sch);
687 const struct tc_netem_corrupt *r = nla_data(attr);
688
689 q->corrupt = r->probability;
690 init_crandom(&q->corrupt_cor, r->correlation);
691 }
692
693 static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
694 {
695 struct netem_sched_data *q = qdisc_priv(sch);
696 const struct tc_netem_rate *r = nla_data(attr);
697
698 q->rate = r->rate;
699 q->packet_overhead = r->packet_overhead;
700 q->cell_size = r->cell_size;
701 if (q->cell_size)
702 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
703 q->cell_overhead = r->cell_overhead;
704 }
705
706 static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
707 {
708 struct netem_sched_data *q = qdisc_priv(sch);
709 const struct nlattr *la;
710 int rem;
711
712 nla_for_each_nested(la, attr, rem) {
713 u16 type = nla_type(la);
714
715 switch(type) {
716 case NETEM_LOSS_GI: {
717 const struct tc_netem_gimodel *gi = nla_data(la);
718
719 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
720 pr_info("netem: incorrect gi model size\n");
721 return -EINVAL;
722 }
723
724 q->loss_model = CLG_4_STATES;
725
726 q->clg.state = 1;
727 q->clg.a1 = gi->p13;
728 q->clg.a2 = gi->p31;
729 q->clg.a3 = gi->p32;
730 q->clg.a4 = gi->p14;
731 q->clg.a5 = gi->p23;
732 break;
733 }
734
735 case NETEM_LOSS_GE: {
736 const struct tc_netem_gemodel *ge = nla_data(la);
737
738 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
739 pr_info("netem: incorrect ge model size\n");
740 return -EINVAL;
741 }
742
743 q->loss_model = CLG_GILB_ELL;
744 q->clg.state = 1;
745 q->clg.a1 = ge->p;
746 q->clg.a2 = ge->r;
747 q->clg.a3 = ge->h;
748 q->clg.a4 = ge->k1;
749 break;
750 }
751
752 default:
753 pr_info("netem: unknown loss type %u\n", type);
754 return -EINVAL;
755 }
756 }
757
758 return 0;
759 }
760
761 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
762 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
763 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
764 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
765 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
766 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
767 [TCA_NETEM_ECN] = { .type = NLA_U32 },
768 };
769
770 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
771 const struct nla_policy *policy, int len)
772 {
773 int nested_len = nla_len(nla) - NLA_ALIGN(len);
774
775 if (nested_len < 0) {
776 pr_info("netem: invalid attributes len %d\n", nested_len);
777 return -EINVAL;
778 }
779
780 if (nested_len >= nla_attr_size(0))
781 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
782 nested_len, policy);
783
784 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
785 return 0;
786 }
787
788 /* Parse netlink message to set options */
789 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
790 {
791 struct netem_sched_data *q = qdisc_priv(sch);
792 struct nlattr *tb[TCA_NETEM_MAX + 1];
793 struct tc_netem_qopt *qopt;
794 int ret;
795
796 if (opt == NULL)
797 return -EINVAL;
798
799 qopt = nla_data(opt);
800 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
801 if (ret < 0)
802 return ret;
803
804 sch->limit = qopt->limit;
805
806 q->latency = qopt->latency;
807 q->jitter = qopt->jitter;
808 q->limit = qopt->limit;
809 q->gap = qopt->gap;
810 q->counter = 0;
811 q->loss = qopt->loss;
812 q->duplicate = qopt->duplicate;
813
814 /* for compatibility with earlier versions.
815 * if gap is set, need to assume 100% probability
816 */
817 if (q->gap)
818 q->reorder = ~0;
819
820 if (tb[TCA_NETEM_CORR])
821 get_correlation(sch, tb[TCA_NETEM_CORR]);
822
823 if (tb[TCA_NETEM_DELAY_DIST]) {
824 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
825 if (ret)
826 return ret;
827 }
828
829 if (tb[TCA_NETEM_REORDER])
830 get_reorder(sch, tb[TCA_NETEM_REORDER]);
831
832 if (tb[TCA_NETEM_CORRUPT])
833 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
834
835 if (tb[TCA_NETEM_RATE])
836 get_rate(sch, tb[TCA_NETEM_RATE]);
837
838 if (tb[TCA_NETEM_ECN])
839 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
840
841 q->loss_model = CLG_RANDOM;
842 if (tb[TCA_NETEM_LOSS])
843 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
844
845 return ret;
846 }
847
848 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
849 {
850 struct netem_sched_data *q = qdisc_priv(sch);
851 int ret;
852
853 if (!opt)
854 return -EINVAL;
855
856 qdisc_watchdog_init(&q->watchdog, sch);
857
858 q->loss_model = CLG_RANDOM;
859 ret = netem_change(sch, opt);
860 if (ret)
861 pr_info("netem: change failed\n");
862 return ret;
863 }
864
865 static void netem_destroy(struct Qdisc *sch)
866 {
867 struct netem_sched_data *q = qdisc_priv(sch);
868
869 qdisc_watchdog_cancel(&q->watchdog);
870 if (q->qdisc)
871 qdisc_destroy(q->qdisc);
872 dist_free(q->delay_dist);
873 }
874
875 static int dump_loss_model(const struct netem_sched_data *q,
876 struct sk_buff *skb)
877 {
878 struct nlattr *nest;
879
880 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
881 if (nest == NULL)
882 goto nla_put_failure;
883
884 switch (q->loss_model) {
885 case CLG_RANDOM:
886 /* legacy loss model */
887 nla_nest_cancel(skb, nest);
888 return 0; /* no data */
889
890 case CLG_4_STATES: {
891 struct tc_netem_gimodel gi = {
892 .p13 = q->clg.a1,
893 .p31 = q->clg.a2,
894 .p32 = q->clg.a3,
895 .p14 = q->clg.a4,
896 .p23 = q->clg.a5,
897 };
898
899 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
900 goto nla_put_failure;
901 break;
902 }
903 case CLG_GILB_ELL: {
904 struct tc_netem_gemodel ge = {
905 .p = q->clg.a1,
906 .r = q->clg.a2,
907 .h = q->clg.a3,
908 .k1 = q->clg.a4,
909 };
910
911 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
912 goto nla_put_failure;
913 break;
914 }
915 }
916
917 nla_nest_end(skb, nest);
918 return 0;
919
920 nla_put_failure:
921 nla_nest_cancel(skb, nest);
922 return -1;
923 }
924
925 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
926 {
927 const struct netem_sched_data *q = qdisc_priv(sch);
928 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
929 struct tc_netem_qopt qopt;
930 struct tc_netem_corr cor;
931 struct tc_netem_reorder reorder;
932 struct tc_netem_corrupt corrupt;
933 struct tc_netem_rate rate;
934
935 qopt.latency = q->latency;
936 qopt.jitter = q->jitter;
937 qopt.limit = q->limit;
938 qopt.loss = q->loss;
939 qopt.gap = q->gap;
940 qopt.duplicate = q->duplicate;
941 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
942 goto nla_put_failure;
943
944 cor.delay_corr = q->delay_cor.rho;
945 cor.loss_corr = q->loss_cor.rho;
946 cor.dup_corr = q->dup_cor.rho;
947 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
948 goto nla_put_failure;
949
950 reorder.probability = q->reorder;
951 reorder.correlation = q->reorder_cor.rho;
952 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
953 goto nla_put_failure;
954
955 corrupt.probability = q->corrupt;
956 corrupt.correlation = q->corrupt_cor.rho;
957 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
958 goto nla_put_failure;
959
960 rate.rate = q->rate;
961 rate.packet_overhead = q->packet_overhead;
962 rate.cell_size = q->cell_size;
963 rate.cell_overhead = q->cell_overhead;
964 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
965 goto nla_put_failure;
966
967 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
968 goto nla_put_failure;
969
970 if (dump_loss_model(q, skb) != 0)
971 goto nla_put_failure;
972
973 return nla_nest_end(skb, nla);
974
975 nla_put_failure:
976 nlmsg_trim(skb, nla);
977 return -1;
978 }
979
980 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
981 struct sk_buff *skb, struct tcmsg *tcm)
982 {
983 struct netem_sched_data *q = qdisc_priv(sch);
984
985 if (cl != 1 || !q->qdisc) /* only one class */
986 return -ENOENT;
987
988 tcm->tcm_handle |= TC_H_MIN(1);
989 tcm->tcm_info = q->qdisc->handle;
990
991 return 0;
992 }
993
994 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
995 struct Qdisc **old)
996 {
997 struct netem_sched_data *q = qdisc_priv(sch);
998
999 sch_tree_lock(sch);
1000 *old = q->qdisc;
1001 q->qdisc = new;
1002 if (*old) {
1003 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1004 qdisc_reset(*old);
1005 }
1006 sch_tree_unlock(sch);
1007
1008 return 0;
1009 }
1010
1011 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1012 {
1013 struct netem_sched_data *q = qdisc_priv(sch);
1014 return q->qdisc;
1015 }
1016
1017 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1018 {
1019 return 1;
1020 }
1021
1022 static void netem_put(struct Qdisc *sch, unsigned long arg)
1023 {
1024 }
1025
1026 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1027 {
1028 if (!walker->stop) {
1029 if (walker->count >= walker->skip)
1030 if (walker->fn(sch, 1, walker) < 0) {
1031 walker->stop = 1;
1032 return;
1033 }
1034 walker->count++;
1035 }
1036 }
1037
1038 static const struct Qdisc_class_ops netem_class_ops = {
1039 .graft = netem_graft,
1040 .leaf = netem_leaf,
1041 .get = netem_get,
1042 .put = netem_put,
1043 .walk = netem_walk,
1044 .dump = netem_dump_class,
1045 };
1046
1047 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1048 .id = "netem",
1049 .cl_ops = &netem_class_ops,
1050 .priv_size = sizeof(struct netem_sched_data),
1051 .enqueue = netem_enqueue,
1052 .dequeue = netem_dequeue,
1053 .peek = qdisc_peek_dequeued,
1054 .drop = netem_drop,
1055 .init = netem_init,
1056 .reset = netem_reset,
1057 .destroy = netem_destroy,
1058 .change = netem_change,
1059 .dump = netem_dump,
1060 .owner = THIS_MODULE,
1061 };
1062
1063
1064 static int __init netem_module_init(void)
1065 {
1066 pr_info("netem: version " VERSION "\n");
1067 return register_qdisc(&netem_qdisc_ops);
1068 }
1069 static void __exit netem_module_exit(void)
1070 {
1071 unregister_qdisc(&netem_qdisc_ops);
1072 }
1073 module_init(netem_module_init)
1074 module_exit(netem_module_exit)
1075 MODULE_LICENSE("GPL");
This page took 0.066463 seconds and 5 git commands to generate.