Merge tag 'trace-fixes-v3.17-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / net / sched / sch_netem.c
1 /*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
27
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
31
32 #define VERSION "1.3"
33
34 /* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
55
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
69 */
70
71 struct netem_sched_data {
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
74
75 /* optional qdisc for classful handling (NULL at netem init) */
76 struct Qdisc *qdisc;
77
78 struct qdisc_watchdog watchdog;
79
80 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
82
83 u32 loss;
84 u32 ecn;
85 u32 limit;
86 u32 counter;
87 u32 gap;
88 u32 duplicate;
89 u32 reorder;
90 u32 corrupt;
91 u64 rate;
92 s32 packet_overhead;
93 u32 cell_size;
94 struct reciprocal_value cell_size_reciprocal;
95 s32 cell_overhead;
96
97 struct crndstate {
98 u32 last;
99 u32 rho;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
138 };
139
140 /* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
142 */
143 struct netem_skb_cb {
144 psched_time_t time_to_send;
145 ktime_t tstamp_save;
146 };
147
148 /* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp
149 * to hold a rb_node structure.
150 *
151 * If struct sk_buff layout is changed, the following checks will complain.
152 */
153 static struct rb_node *netem_rb_node(struct sk_buff *skb)
154 {
155 BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0);
156 BUILD_BUG_ON(offsetof(struct sk_buff, prev) !=
157 offsetof(struct sk_buff, next) + sizeof(skb->next));
158 BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) !=
159 offsetof(struct sk_buff, prev) + sizeof(skb->prev));
160 BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) +
161 sizeof(skb->prev) +
162 sizeof(skb->tstamp));
163 return (struct rb_node *)&skb->next;
164 }
165
166 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
167 {
168 return (struct sk_buff *)rb;
169 }
170
171 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
172 {
173 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
174 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
175 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
176 }
177
178 /* init_crandom - initialize correlated random number generator
179 * Use entropy source for initial seed.
180 */
181 static void init_crandom(struct crndstate *state, unsigned long rho)
182 {
183 state->rho = rho;
184 state->last = prandom_u32();
185 }
186
187 /* get_crandom - correlated random number generator
188 * Next number depends on last value.
189 * rho is scaled to avoid floating point.
190 */
191 static u32 get_crandom(struct crndstate *state)
192 {
193 u64 value, rho;
194 unsigned long answer;
195
196 if (state->rho == 0) /* no correlation */
197 return prandom_u32();
198
199 value = prandom_u32();
200 rho = (u64)state->rho + 1;
201 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
202 state->last = answer;
203 return answer;
204 }
205
206 /* loss_4state - 4-state model loss generator
207 * Generates losses according to the 4-state Markov chain adopted in
208 * the GI (General and Intuitive) loss model.
209 */
210 static bool loss_4state(struct netem_sched_data *q)
211 {
212 struct clgstate *clg = &q->clg;
213 u32 rnd = prandom_u32();
214
215 /*
216 * Makes a comparison between rnd and the transition
217 * probabilities outgoing from the current state, then decides the
218 * next state and if the next packet has to be transmitted or lost.
219 * The four states correspond to:
220 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
221 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
222 * LOST_IN_GAP_PERIOD => lost packets within a burst period
223 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
224 */
225 switch (clg->state) {
226 case TX_IN_GAP_PERIOD:
227 if (rnd < clg->a4) {
228 clg->state = LOST_IN_BURST_PERIOD;
229 return true;
230 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
231 clg->state = LOST_IN_GAP_PERIOD;
232 return true;
233 } else if (clg->a1 + clg->a4 < rnd) {
234 clg->state = TX_IN_GAP_PERIOD;
235 }
236
237 break;
238 case TX_IN_BURST_PERIOD:
239 if (rnd < clg->a5) {
240 clg->state = LOST_IN_GAP_PERIOD;
241 return true;
242 } else {
243 clg->state = TX_IN_BURST_PERIOD;
244 }
245
246 break;
247 case LOST_IN_GAP_PERIOD:
248 if (rnd < clg->a3)
249 clg->state = TX_IN_BURST_PERIOD;
250 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
251 clg->state = TX_IN_GAP_PERIOD;
252 } else if (clg->a2 + clg->a3 < rnd) {
253 clg->state = LOST_IN_GAP_PERIOD;
254 return true;
255 }
256 break;
257 case LOST_IN_BURST_PERIOD:
258 clg->state = TX_IN_GAP_PERIOD;
259 break;
260 }
261
262 return false;
263 }
264
265 /* loss_gilb_ell - Gilbert-Elliot model loss generator
266 * Generates losses according to the Gilbert-Elliot loss model or
267 * its special cases (Gilbert or Simple Gilbert)
268 *
269 * Makes a comparison between random number and the transition
270 * probabilities outgoing from the current state, then decides the
271 * next state. A second random number is extracted and the comparison
272 * with the loss probability of the current state decides if the next
273 * packet will be transmitted or lost.
274 */
275 static bool loss_gilb_ell(struct netem_sched_data *q)
276 {
277 struct clgstate *clg = &q->clg;
278
279 switch (clg->state) {
280 case GOOD_STATE:
281 if (prandom_u32() < clg->a1)
282 clg->state = BAD_STATE;
283 if (prandom_u32() < clg->a4)
284 return true;
285 break;
286 case BAD_STATE:
287 if (prandom_u32() < clg->a2)
288 clg->state = GOOD_STATE;
289 if (prandom_u32() > clg->a3)
290 return true;
291 }
292
293 return false;
294 }
295
296 static bool loss_event(struct netem_sched_data *q)
297 {
298 switch (q->loss_model) {
299 case CLG_RANDOM:
300 /* Random packet drop 0 => none, ~0 => all */
301 return q->loss && q->loss >= get_crandom(&q->loss_cor);
302
303 case CLG_4_STATES:
304 /* 4state loss model algorithm (used also for GI model)
305 * Extracts a value from the markov 4 state loss generator,
306 * if it is 1 drops a packet and if needed writes the event in
307 * the kernel logs
308 */
309 return loss_4state(q);
310
311 case CLG_GILB_ELL:
312 /* Gilbert-Elliot loss model algorithm
313 * Extracts a value from the Gilbert-Elliot loss generator,
314 * if it is 1 drops a packet and if needed writes the event in
315 * the kernel logs
316 */
317 return loss_gilb_ell(q);
318 }
319
320 return false; /* not reached */
321 }
322
323
324 /* tabledist - return a pseudo-randomly distributed value with mean mu and
325 * std deviation sigma. Uses table lookup to approximate the desired
326 * distribution, and a uniformly-distributed pseudo-random source.
327 */
328 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
329 struct crndstate *state,
330 const struct disttable *dist)
331 {
332 psched_tdiff_t x;
333 long t;
334 u32 rnd;
335
336 if (sigma == 0)
337 return mu;
338
339 rnd = get_crandom(state);
340
341 /* default uniform distribution */
342 if (dist == NULL)
343 return (rnd % (2*sigma)) - sigma + mu;
344
345 t = dist->table[rnd % dist->size];
346 x = (sigma % NETEM_DIST_SCALE) * t;
347 if (x >= 0)
348 x += NETEM_DIST_SCALE/2;
349 else
350 x -= NETEM_DIST_SCALE/2;
351
352 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
353 }
354
355 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
356 {
357 u64 ticks;
358
359 len += q->packet_overhead;
360
361 if (q->cell_size) {
362 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
363
364 if (len > cells * q->cell_size) /* extra cell needed for remainder */
365 cells++;
366 len = cells * (q->cell_size + q->cell_overhead);
367 }
368
369 ticks = (u64)len * NSEC_PER_SEC;
370
371 do_div(ticks, q->rate);
372 return PSCHED_NS2TICKS(ticks);
373 }
374
375 static void tfifo_reset(struct Qdisc *sch)
376 {
377 struct netem_sched_data *q = qdisc_priv(sch);
378 struct rb_node *p;
379
380 while ((p = rb_first(&q->t_root))) {
381 struct sk_buff *skb = netem_rb_to_skb(p);
382
383 rb_erase(p, &q->t_root);
384 skb->next = NULL;
385 skb->prev = NULL;
386 kfree_skb(skb);
387 }
388 }
389
390 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
391 {
392 struct netem_sched_data *q = qdisc_priv(sch);
393 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
394 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
395
396 while (*p) {
397 struct sk_buff *skb;
398
399 parent = *p;
400 skb = netem_rb_to_skb(parent);
401 if (tnext >= netem_skb_cb(skb)->time_to_send)
402 p = &parent->rb_right;
403 else
404 p = &parent->rb_left;
405 }
406 rb_link_node(netem_rb_node(nskb), parent, p);
407 rb_insert_color(netem_rb_node(nskb), &q->t_root);
408 sch->q.qlen++;
409 }
410
411 /*
412 * Insert one skb into qdisc.
413 * Note: parent depends on return value to account for queue length.
414 * NET_XMIT_DROP: queue length didn't change.
415 * NET_XMIT_SUCCESS: one skb was queued.
416 */
417 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
418 {
419 struct netem_sched_data *q = qdisc_priv(sch);
420 /* We don't fill cb now as skb_unshare() may invalidate it */
421 struct netem_skb_cb *cb;
422 struct sk_buff *skb2;
423 int count = 1;
424
425 /* Random duplication */
426 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
427 ++count;
428
429 /* Drop packet? */
430 if (loss_event(q)) {
431 if (q->ecn && INET_ECN_set_ce(skb))
432 sch->qstats.drops++; /* mark packet */
433 else
434 --count;
435 }
436 if (count == 0) {
437 sch->qstats.drops++;
438 kfree_skb(skb);
439 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
440 }
441
442 /* If a delay is expected, orphan the skb. (orphaning usually takes
443 * place at TX completion time, so _before_ the link transit delay)
444 */
445 if (q->latency || q->jitter)
446 skb_orphan_partial(skb);
447
448 /*
449 * If we need to duplicate packet, then re-insert at top of the
450 * qdisc tree, since parent queuer expects that only one
451 * skb will be queued.
452 */
453 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
454 struct Qdisc *rootq = qdisc_root(sch);
455 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
456 q->duplicate = 0;
457
458 qdisc_enqueue_root(skb2, rootq);
459 q->duplicate = dupsave;
460 }
461
462 /*
463 * Randomized packet corruption.
464 * Make copy if needed since we are modifying
465 * If packet is going to be hardware checksummed, then
466 * do it now in software before we mangle it.
467 */
468 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
469 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
470 (skb->ip_summed == CHECKSUM_PARTIAL &&
471 skb_checksum_help(skb)))
472 return qdisc_drop(skb, sch);
473
474 skb->data[prandom_u32() % skb_headlen(skb)] ^=
475 1<<(prandom_u32() % 8);
476 }
477
478 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
479 return qdisc_reshape_fail(skb, sch);
480
481 sch->qstats.backlog += qdisc_pkt_len(skb);
482
483 cb = netem_skb_cb(skb);
484 if (q->gap == 0 || /* not doing reordering */
485 q->counter < q->gap - 1 || /* inside last reordering gap */
486 q->reorder < get_crandom(&q->reorder_cor)) {
487 psched_time_t now;
488 psched_tdiff_t delay;
489
490 delay = tabledist(q->latency, q->jitter,
491 &q->delay_cor, q->delay_dist);
492
493 now = psched_get_time();
494
495 if (q->rate) {
496 struct sk_buff *last;
497
498 if (!skb_queue_empty(&sch->q))
499 last = skb_peek_tail(&sch->q);
500 else
501 last = netem_rb_to_skb(rb_last(&q->t_root));
502 if (last) {
503 /*
504 * Last packet in queue is reference point (now),
505 * calculate this time bonus and subtract
506 * from delay.
507 */
508 delay -= netem_skb_cb(last)->time_to_send - now;
509 delay = max_t(psched_tdiff_t, 0, delay);
510 now = netem_skb_cb(last)->time_to_send;
511 }
512
513 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
514 }
515
516 cb->time_to_send = now + delay;
517 cb->tstamp_save = skb->tstamp;
518 ++q->counter;
519 tfifo_enqueue(skb, sch);
520 } else {
521 /*
522 * Do re-ordering by putting one out of N packets at the front
523 * of the queue.
524 */
525 cb->time_to_send = psched_get_time();
526 q->counter = 0;
527
528 __skb_queue_head(&sch->q, skb);
529 sch->qstats.requeues++;
530 }
531
532 return NET_XMIT_SUCCESS;
533 }
534
535 static unsigned int netem_drop(struct Qdisc *sch)
536 {
537 struct netem_sched_data *q = qdisc_priv(sch);
538 unsigned int len;
539
540 len = qdisc_queue_drop(sch);
541
542 if (!len) {
543 struct rb_node *p = rb_first(&q->t_root);
544
545 if (p) {
546 struct sk_buff *skb = netem_rb_to_skb(p);
547
548 rb_erase(p, &q->t_root);
549 sch->q.qlen--;
550 skb->next = NULL;
551 skb->prev = NULL;
552 len = qdisc_pkt_len(skb);
553 sch->qstats.backlog -= len;
554 kfree_skb(skb);
555 }
556 }
557 if (!len && q->qdisc && q->qdisc->ops->drop)
558 len = q->qdisc->ops->drop(q->qdisc);
559 if (len)
560 sch->qstats.drops++;
561
562 return len;
563 }
564
565 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
566 {
567 struct netem_sched_data *q = qdisc_priv(sch);
568 struct sk_buff *skb;
569 struct rb_node *p;
570
571 if (qdisc_is_throttled(sch))
572 return NULL;
573
574 tfifo_dequeue:
575 skb = __skb_dequeue(&sch->q);
576 if (skb) {
577 deliver:
578 sch->qstats.backlog -= qdisc_pkt_len(skb);
579 qdisc_unthrottled(sch);
580 qdisc_bstats_update(sch, skb);
581 return skb;
582 }
583 p = rb_first(&q->t_root);
584 if (p) {
585 psched_time_t time_to_send;
586
587 skb = netem_rb_to_skb(p);
588
589 /* if more time remaining? */
590 time_to_send = netem_skb_cb(skb)->time_to_send;
591 if (time_to_send <= psched_get_time()) {
592 rb_erase(p, &q->t_root);
593
594 sch->q.qlen--;
595 skb->next = NULL;
596 skb->prev = NULL;
597 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
598
599 #ifdef CONFIG_NET_CLS_ACT
600 /*
601 * If it's at ingress let's pretend the delay is
602 * from the network (tstamp will be updated).
603 */
604 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
605 skb->tstamp.tv64 = 0;
606 #endif
607
608 if (q->qdisc) {
609 int err = qdisc_enqueue(skb, q->qdisc);
610
611 if (unlikely(err != NET_XMIT_SUCCESS)) {
612 if (net_xmit_drop_count(err)) {
613 sch->qstats.drops++;
614 qdisc_tree_decrease_qlen(sch, 1);
615 }
616 }
617 goto tfifo_dequeue;
618 }
619 goto deliver;
620 }
621
622 if (q->qdisc) {
623 skb = q->qdisc->ops->dequeue(q->qdisc);
624 if (skb)
625 goto deliver;
626 }
627 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
628 }
629
630 if (q->qdisc) {
631 skb = q->qdisc->ops->dequeue(q->qdisc);
632 if (skb)
633 goto deliver;
634 }
635 return NULL;
636 }
637
638 static void netem_reset(struct Qdisc *sch)
639 {
640 struct netem_sched_data *q = qdisc_priv(sch);
641
642 qdisc_reset_queue(sch);
643 tfifo_reset(sch);
644 if (q->qdisc)
645 qdisc_reset(q->qdisc);
646 qdisc_watchdog_cancel(&q->watchdog);
647 }
648
649 static void dist_free(struct disttable *d)
650 {
651 kvfree(d);
652 }
653
654 /*
655 * Distribution data is a variable size payload containing
656 * signed 16 bit values.
657 */
658 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
659 {
660 struct netem_sched_data *q = qdisc_priv(sch);
661 size_t n = nla_len(attr)/sizeof(__s16);
662 const __s16 *data = nla_data(attr);
663 spinlock_t *root_lock;
664 struct disttable *d;
665 int i;
666 size_t s;
667
668 if (n > NETEM_DIST_MAX)
669 return -EINVAL;
670
671 s = sizeof(struct disttable) + n * sizeof(s16);
672 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
673 if (!d)
674 d = vmalloc(s);
675 if (!d)
676 return -ENOMEM;
677
678 d->size = n;
679 for (i = 0; i < n; i++)
680 d->table[i] = data[i];
681
682 root_lock = qdisc_root_sleeping_lock(sch);
683
684 spin_lock_bh(root_lock);
685 swap(q->delay_dist, d);
686 spin_unlock_bh(root_lock);
687
688 dist_free(d);
689 return 0;
690 }
691
692 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
693 {
694 const struct tc_netem_corr *c = nla_data(attr);
695
696 init_crandom(&q->delay_cor, c->delay_corr);
697 init_crandom(&q->loss_cor, c->loss_corr);
698 init_crandom(&q->dup_cor, c->dup_corr);
699 }
700
701 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
702 {
703 const struct tc_netem_reorder *r = nla_data(attr);
704
705 q->reorder = r->probability;
706 init_crandom(&q->reorder_cor, r->correlation);
707 }
708
709 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
710 {
711 const struct tc_netem_corrupt *r = nla_data(attr);
712
713 q->corrupt = r->probability;
714 init_crandom(&q->corrupt_cor, r->correlation);
715 }
716
717 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
718 {
719 const struct tc_netem_rate *r = nla_data(attr);
720
721 q->rate = r->rate;
722 q->packet_overhead = r->packet_overhead;
723 q->cell_size = r->cell_size;
724 q->cell_overhead = r->cell_overhead;
725 if (q->cell_size)
726 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
727 else
728 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
729 }
730
731 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
732 {
733 const struct nlattr *la;
734 int rem;
735
736 nla_for_each_nested(la, attr, rem) {
737 u16 type = nla_type(la);
738
739 switch (type) {
740 case NETEM_LOSS_GI: {
741 const struct tc_netem_gimodel *gi = nla_data(la);
742
743 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
744 pr_info("netem: incorrect gi model size\n");
745 return -EINVAL;
746 }
747
748 q->loss_model = CLG_4_STATES;
749
750 q->clg.state = TX_IN_GAP_PERIOD;
751 q->clg.a1 = gi->p13;
752 q->clg.a2 = gi->p31;
753 q->clg.a3 = gi->p32;
754 q->clg.a4 = gi->p14;
755 q->clg.a5 = gi->p23;
756 break;
757 }
758
759 case NETEM_LOSS_GE: {
760 const struct tc_netem_gemodel *ge = nla_data(la);
761
762 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
763 pr_info("netem: incorrect ge model size\n");
764 return -EINVAL;
765 }
766
767 q->loss_model = CLG_GILB_ELL;
768 q->clg.state = GOOD_STATE;
769 q->clg.a1 = ge->p;
770 q->clg.a2 = ge->r;
771 q->clg.a3 = ge->h;
772 q->clg.a4 = ge->k1;
773 break;
774 }
775
776 default:
777 pr_info("netem: unknown loss type %u\n", type);
778 return -EINVAL;
779 }
780 }
781
782 return 0;
783 }
784
785 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
786 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
787 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
788 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
789 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
790 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
791 [TCA_NETEM_ECN] = { .type = NLA_U32 },
792 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
793 };
794
795 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
796 const struct nla_policy *policy, int len)
797 {
798 int nested_len = nla_len(nla) - NLA_ALIGN(len);
799
800 if (nested_len < 0) {
801 pr_info("netem: invalid attributes len %d\n", nested_len);
802 return -EINVAL;
803 }
804
805 if (nested_len >= nla_attr_size(0))
806 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
807 nested_len, policy);
808
809 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
810 return 0;
811 }
812
813 /* Parse netlink message to set options */
814 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
815 {
816 struct netem_sched_data *q = qdisc_priv(sch);
817 struct nlattr *tb[TCA_NETEM_MAX + 1];
818 struct tc_netem_qopt *qopt;
819 struct clgstate old_clg;
820 int old_loss_model = CLG_RANDOM;
821 int ret;
822
823 if (opt == NULL)
824 return -EINVAL;
825
826 qopt = nla_data(opt);
827 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
828 if (ret < 0)
829 return ret;
830
831 /* backup q->clg and q->loss_model */
832 old_clg = q->clg;
833 old_loss_model = q->loss_model;
834
835 if (tb[TCA_NETEM_LOSS]) {
836 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
837 if (ret) {
838 q->loss_model = old_loss_model;
839 return ret;
840 }
841 } else {
842 q->loss_model = CLG_RANDOM;
843 }
844
845 if (tb[TCA_NETEM_DELAY_DIST]) {
846 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
847 if (ret) {
848 /* recover clg and loss_model, in case of
849 * q->clg and q->loss_model were modified
850 * in get_loss_clg()
851 */
852 q->clg = old_clg;
853 q->loss_model = old_loss_model;
854 return ret;
855 }
856 }
857
858 sch->limit = qopt->limit;
859
860 q->latency = qopt->latency;
861 q->jitter = qopt->jitter;
862 q->limit = qopt->limit;
863 q->gap = qopt->gap;
864 q->counter = 0;
865 q->loss = qopt->loss;
866 q->duplicate = qopt->duplicate;
867
868 /* for compatibility with earlier versions.
869 * if gap is set, need to assume 100% probability
870 */
871 if (q->gap)
872 q->reorder = ~0;
873
874 if (tb[TCA_NETEM_CORR])
875 get_correlation(q, tb[TCA_NETEM_CORR]);
876
877 if (tb[TCA_NETEM_REORDER])
878 get_reorder(q, tb[TCA_NETEM_REORDER]);
879
880 if (tb[TCA_NETEM_CORRUPT])
881 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
882
883 if (tb[TCA_NETEM_RATE])
884 get_rate(q, tb[TCA_NETEM_RATE]);
885
886 if (tb[TCA_NETEM_RATE64])
887 q->rate = max_t(u64, q->rate,
888 nla_get_u64(tb[TCA_NETEM_RATE64]));
889
890 if (tb[TCA_NETEM_ECN])
891 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
892
893 return ret;
894 }
895
896 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
897 {
898 struct netem_sched_data *q = qdisc_priv(sch);
899 int ret;
900
901 if (!opt)
902 return -EINVAL;
903
904 qdisc_watchdog_init(&q->watchdog, sch);
905
906 q->loss_model = CLG_RANDOM;
907 ret = netem_change(sch, opt);
908 if (ret)
909 pr_info("netem: change failed\n");
910 return ret;
911 }
912
913 static void netem_destroy(struct Qdisc *sch)
914 {
915 struct netem_sched_data *q = qdisc_priv(sch);
916
917 qdisc_watchdog_cancel(&q->watchdog);
918 if (q->qdisc)
919 qdisc_destroy(q->qdisc);
920 dist_free(q->delay_dist);
921 }
922
923 static int dump_loss_model(const struct netem_sched_data *q,
924 struct sk_buff *skb)
925 {
926 struct nlattr *nest;
927
928 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
929 if (nest == NULL)
930 goto nla_put_failure;
931
932 switch (q->loss_model) {
933 case CLG_RANDOM:
934 /* legacy loss model */
935 nla_nest_cancel(skb, nest);
936 return 0; /* no data */
937
938 case CLG_4_STATES: {
939 struct tc_netem_gimodel gi = {
940 .p13 = q->clg.a1,
941 .p31 = q->clg.a2,
942 .p32 = q->clg.a3,
943 .p14 = q->clg.a4,
944 .p23 = q->clg.a5,
945 };
946
947 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
948 goto nla_put_failure;
949 break;
950 }
951 case CLG_GILB_ELL: {
952 struct tc_netem_gemodel ge = {
953 .p = q->clg.a1,
954 .r = q->clg.a2,
955 .h = q->clg.a3,
956 .k1 = q->clg.a4,
957 };
958
959 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
960 goto nla_put_failure;
961 break;
962 }
963 }
964
965 nla_nest_end(skb, nest);
966 return 0;
967
968 nla_put_failure:
969 nla_nest_cancel(skb, nest);
970 return -1;
971 }
972
973 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
974 {
975 const struct netem_sched_data *q = qdisc_priv(sch);
976 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
977 struct tc_netem_qopt qopt;
978 struct tc_netem_corr cor;
979 struct tc_netem_reorder reorder;
980 struct tc_netem_corrupt corrupt;
981 struct tc_netem_rate rate;
982
983 qopt.latency = q->latency;
984 qopt.jitter = q->jitter;
985 qopt.limit = q->limit;
986 qopt.loss = q->loss;
987 qopt.gap = q->gap;
988 qopt.duplicate = q->duplicate;
989 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
990 goto nla_put_failure;
991
992 cor.delay_corr = q->delay_cor.rho;
993 cor.loss_corr = q->loss_cor.rho;
994 cor.dup_corr = q->dup_cor.rho;
995 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
996 goto nla_put_failure;
997
998 reorder.probability = q->reorder;
999 reorder.correlation = q->reorder_cor.rho;
1000 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1001 goto nla_put_failure;
1002
1003 corrupt.probability = q->corrupt;
1004 corrupt.correlation = q->corrupt_cor.rho;
1005 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1006 goto nla_put_failure;
1007
1008 if (q->rate >= (1ULL << 32)) {
1009 if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate))
1010 goto nla_put_failure;
1011 rate.rate = ~0U;
1012 } else {
1013 rate.rate = q->rate;
1014 }
1015 rate.packet_overhead = q->packet_overhead;
1016 rate.cell_size = q->cell_size;
1017 rate.cell_overhead = q->cell_overhead;
1018 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1019 goto nla_put_failure;
1020
1021 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1022 goto nla_put_failure;
1023
1024 if (dump_loss_model(q, skb) != 0)
1025 goto nla_put_failure;
1026
1027 return nla_nest_end(skb, nla);
1028
1029 nla_put_failure:
1030 nlmsg_trim(skb, nla);
1031 return -1;
1032 }
1033
1034 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1035 struct sk_buff *skb, struct tcmsg *tcm)
1036 {
1037 struct netem_sched_data *q = qdisc_priv(sch);
1038
1039 if (cl != 1 || !q->qdisc) /* only one class */
1040 return -ENOENT;
1041
1042 tcm->tcm_handle |= TC_H_MIN(1);
1043 tcm->tcm_info = q->qdisc->handle;
1044
1045 return 0;
1046 }
1047
1048 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1049 struct Qdisc **old)
1050 {
1051 struct netem_sched_data *q = qdisc_priv(sch);
1052
1053 sch_tree_lock(sch);
1054 *old = q->qdisc;
1055 q->qdisc = new;
1056 if (*old) {
1057 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1058 qdisc_reset(*old);
1059 }
1060 sch_tree_unlock(sch);
1061
1062 return 0;
1063 }
1064
1065 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1066 {
1067 struct netem_sched_data *q = qdisc_priv(sch);
1068 return q->qdisc;
1069 }
1070
1071 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1072 {
1073 return 1;
1074 }
1075
1076 static void netem_put(struct Qdisc *sch, unsigned long arg)
1077 {
1078 }
1079
1080 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1081 {
1082 if (!walker->stop) {
1083 if (walker->count >= walker->skip)
1084 if (walker->fn(sch, 1, walker) < 0) {
1085 walker->stop = 1;
1086 return;
1087 }
1088 walker->count++;
1089 }
1090 }
1091
1092 static const struct Qdisc_class_ops netem_class_ops = {
1093 .graft = netem_graft,
1094 .leaf = netem_leaf,
1095 .get = netem_get,
1096 .put = netem_put,
1097 .walk = netem_walk,
1098 .dump = netem_dump_class,
1099 };
1100
1101 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1102 .id = "netem",
1103 .cl_ops = &netem_class_ops,
1104 .priv_size = sizeof(struct netem_sched_data),
1105 .enqueue = netem_enqueue,
1106 .dequeue = netem_dequeue,
1107 .peek = qdisc_peek_dequeued,
1108 .drop = netem_drop,
1109 .init = netem_init,
1110 .reset = netem_reset,
1111 .destroy = netem_destroy,
1112 .change = netem_change,
1113 .dump = netem_dump,
1114 .owner = THIS_MODULE,
1115 };
1116
1117
1118 static int __init netem_module_init(void)
1119 {
1120 pr_info("netem: version " VERSION "\n");
1121 return register_qdisc(&netem_qdisc_ops);
1122 }
1123 static void __exit netem_module_exit(void)
1124 {
1125 unregister_qdisc(&netem_qdisc_ops);
1126 }
1127 module_init(netem_module_init)
1128 module_exit(netem_module_exit)
1129 MODULE_LICENSE("GPL");
This page took 0.058958 seconds and 5 git commands to generate.