Merge branch 'qed-static-checker'
[deliverable/linux.git] / net / sched / sch_netem.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
798b6b19 7 * 2 of the License.
1da177e4
LT
8 *
9 * Many of the algorithms and ideas for this came from
10297b99 10 * NIST Net which is not copyrighted.
1da177e4
LT
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
b7f080cf 16#include <linux/mm.h>
1da177e4 17#include <linux/module.h>
5a0e3ad6 18#include <linux/slab.h>
1da177e4
LT
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
1da177e4 22#include <linux/skbuff.h>
78776d3f 23#include <linux/vmalloc.h>
1da177e4 24#include <linux/rtnetlink.h>
90b41a1c 25#include <linux/reciprocal_div.h>
aec0a40a 26#include <linux/rbtree.h>
1da177e4 27
dc5fc579 28#include <net/netlink.h>
1da177e4 29#include <net/pkt_sched.h>
e4ae004b 30#include <net/inet_ecn.h>
1da177e4 31
250a65f7 32#define VERSION "1.3"
eb229c4c 33
1da177e4
LT
34/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
661b7972 55
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
1da177e4
LT
69*/
70
71struct netem_sched_data {
aec0a40a
ED
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
50612537
ED
74
75 /* optional qdisc for classful handling (NULL at netem init) */
1da177e4 76 struct Qdisc *qdisc;
50612537 77
59cb5c67 78 struct qdisc_watchdog watchdog;
1da177e4 79
b407621c
SH
80 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
82
1da177e4 83 u32 loss;
e4ae004b 84 u32 ecn;
1da177e4
LT
85 u32 limit;
86 u32 counter;
87 u32 gap;
1da177e4 88 u32 duplicate;
0dca51d3 89 u32 reorder;
c865e5d9 90 u32 corrupt;
6a031f67 91 u64 rate;
90b41a1c
HPP
92 s32 packet_overhead;
93 u32 cell_size;
809fa972 94 struct reciprocal_value cell_size_reciprocal;
90b41a1c 95 s32 cell_overhead;
1da177e4
LT
96
97 struct crndstate {
b407621c
SH
98 u32 last;
99 u32 rho;
c865e5d9 100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
1da177e4
LT
101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
661b7972 106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
a6e2fe17
YY
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
c045a734
YY
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
661b7972 125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
1da177e4
LT
138};
139
50612537
ED
140/* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
56b17425
ED
142 *
143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
144 * and skb->next & skb->prev are scratch space for a qdisc,
145 * we save skb->tstamp value in skb->cb[] before destroying it.
50612537 146 */
1da177e4
LT
147struct netem_skb_cb {
148 psched_time_t time_to_send;
aec0a40a 149 ktime_t tstamp_save;
1da177e4
LT
150};
151
aec0a40a
ED
152
153static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
154{
56b17425 155 return container_of(rb, struct sk_buff, rbnode);
aec0a40a
ED
156}
157
5f86173b
JK
158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159{
aec0a40a 160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
16bda13d 161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
175f9c1b 162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
5f86173b
JK
163}
164
1da177e4
LT
165/* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
167 */
168static void init_crandom(struct crndstate *state, unsigned long rho)
169{
170 state->rho = rho;
63862b5b 171 state->last = prandom_u32();
1da177e4
LT
172}
173
174/* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
177 */
b407621c 178static u32 get_crandom(struct crndstate *state)
1da177e4
LT
179{
180 u64 value, rho;
181 unsigned long answer;
182
bb2f8cc0 183 if (state->rho == 0) /* no correlation */
63862b5b 184 return prandom_u32();
1da177e4 185
63862b5b 186 value = prandom_u32();
1da177e4
LT
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
191}
192
661b7972 193/* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
196 */
197static bool loss_4state(struct netem_sched_data *q)
198{
199 struct clgstate *clg = &q->clg;
63862b5b 200 u32 rnd = prandom_u32();
661b7972 201
202 /*
25985edc 203 * Makes a comparison between rnd and the transition
661b7972 204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
a6e2fe17
YY
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
661b7972 211 */
212 switch (clg->state) {
a6e2fe17 213 case TX_IN_GAP_PERIOD:
661b7972 214 if (rnd < clg->a4) {
a6e2fe17 215 clg->state = LOST_IN_BURST_PERIOD;
661b7972 216 return true;
ab6c27be 217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
a6e2fe17 218 clg->state = LOST_IN_GAP_PERIOD;
661b7972 219 return true;
a6e2fe17
YY
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
222 }
661b7972 223
224 break;
a6e2fe17 225 case TX_IN_BURST_PERIOD:
661b7972 226 if (rnd < clg->a5) {
a6e2fe17 227 clg->state = LOST_IN_GAP_PERIOD;
661b7972 228 return true;
a6e2fe17
YY
229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
231 }
661b7972 232
233 break;
a6e2fe17 234 case LOST_IN_GAP_PERIOD:
661b7972 235 if (rnd < clg->a3)
a6e2fe17 236 clg->state = TX_IN_BURST_PERIOD;
661b7972 237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
a6e2fe17 238 clg->state = TX_IN_GAP_PERIOD;
661b7972 239 } else if (clg->a2 + clg->a3 < rnd) {
a6e2fe17 240 clg->state = LOST_IN_GAP_PERIOD;
661b7972 241 return true;
242 }
243 break;
a6e2fe17
YY
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
661b7972 246 break;
247 }
248
249 return false;
250}
251
252/* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
255 *
25985edc 256 * Makes a comparison between random number and the transition
661b7972 257 * probabilities outgoing from the current state, then decides the
25985edc 258 * next state. A second random number is extracted and the comparison
661b7972 259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
261 */
262static bool loss_gilb_ell(struct netem_sched_data *q)
263{
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
c045a734 267 case GOOD_STATE:
63862b5b 268 if (prandom_u32() < clg->a1)
c045a734 269 clg->state = BAD_STATE;
63862b5b 270 if (prandom_u32() < clg->a4)
661b7972 271 return true;
7c2781fa 272 break;
c045a734 273 case BAD_STATE:
63862b5b 274 if (prandom_u32() < clg->a2)
c045a734 275 clg->state = GOOD_STATE;
63862b5b 276 if (prandom_u32() > clg->a3)
661b7972 277 return true;
278 }
279
280 return false;
281}
282
283static bool loss_event(struct netem_sched_data *q)
284{
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
289
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
295 */
296 return loss_4state(q);
297
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
303 */
304 return loss_gilb_ell(q);
305 }
306
307 return false; /* not reached */
308}
309
310
1da177e4
LT
311/* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
314 */
b407621c
SH
315static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
1da177e4 318{
b407621c
SH
319 psched_tdiff_t x;
320 long t;
321 u32 rnd;
1da177e4
LT
322
323 if (sigma == 0)
324 return mu;
325
326 rnd = get_crandom(state);
327
328 /* default uniform distribution */
10297b99 329 if (dist == NULL)
1da177e4
LT
330 return (rnd % (2*sigma)) - sigma + mu;
331
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
338
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
340}
341
90b41a1c 342static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
7bc0f28c 343{
90b41a1c 344 u64 ticks;
fc33cc72 345
90b41a1c
HPP
346 len += q->packet_overhead;
347
348 if (q->cell_size) {
349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
350
351 if (len > cells * q->cell_size) /* extra cell needed for remainder */
352 cells++;
353 len = cells * (q->cell_size + q->cell_overhead);
354 }
355
356 ticks = (u64)len * NSEC_PER_SEC;
357
358 do_div(ticks, q->rate);
fc33cc72 359 return PSCHED_NS2TICKS(ticks);
7bc0f28c
HPP
360}
361
ff704050 362static void tfifo_reset(struct Qdisc *sch)
363{
364 struct netem_sched_data *q = qdisc_priv(sch);
365 struct rb_node *p;
366
367 while ((p = rb_first(&q->t_root))) {
368 struct sk_buff *skb = netem_rb_to_skb(p);
369
370 rb_erase(p, &q->t_root);
2f08a9a1 371 rtnl_kfree_skbs(skb, skb);
ff704050 372 }
373}
374
960fb66e 375static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
50612537 376{
aec0a40a 377 struct netem_sched_data *q = qdisc_priv(sch);
50612537 378 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
aec0a40a 379 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
50612537 380
aec0a40a
ED
381 while (*p) {
382 struct sk_buff *skb;
50612537 383
aec0a40a
ED
384 parent = *p;
385 skb = netem_rb_to_skb(parent);
960fb66e 386 if (tnext >= netem_skb_cb(skb)->time_to_send)
aec0a40a
ED
387 p = &parent->rb_right;
388 else
389 p = &parent->rb_left;
50612537 390 }
56b17425
ED
391 rb_link_node(&nskb->rbnode, parent, p);
392 rb_insert_color(&nskb->rbnode, &q->t_root);
aec0a40a 393 sch->q.qlen++;
50612537
ED
394}
395
6071bd1a
NH
396/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
397 * when we statistically choose to corrupt one, we instead segment it, returning
398 * the first packet to be corrupted, and re-enqueue the remaining frames
399 */
520ac30f
ED
400static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
401 struct sk_buff **to_free)
6071bd1a
NH
402{
403 struct sk_buff *segs;
404 netdev_features_t features = netif_skb_features(skb);
405
406 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
407
408 if (IS_ERR_OR_NULL(segs)) {
520ac30f 409 qdisc_drop(skb, sch, to_free);
6071bd1a
NH
410 return NULL;
411 }
412 consume_skb(skb);
413 return segs;
414}
415
0afb51e7
SH
416/*
417 * Insert one skb into qdisc.
418 * Note: parent depends on return value to account for queue length.
419 * NET_XMIT_DROP: queue length didn't change.
420 * NET_XMIT_SUCCESS: one skb was queued.
421 */
520ac30f
ED
422static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
423 struct sk_buff **to_free)
1da177e4
LT
424{
425 struct netem_sched_data *q = qdisc_priv(sch);
89e1df74
GC
426 /* We don't fill cb now as skb_unshare() may invalidate it */
427 struct netem_skb_cb *cb;
0afb51e7 428 struct sk_buff *skb2;
6071bd1a
NH
429 struct sk_buff *segs = NULL;
430 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
431 int nb = 0;
0afb51e7 432 int count = 1;
6071bd1a 433 int rc = NET_XMIT_SUCCESS;
1da177e4 434
0afb51e7
SH
435 /* Random duplication */
436 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
437 ++count;
438
661b7972 439 /* Drop packet? */
e4ae004b
ED
440 if (loss_event(q)) {
441 if (q->ecn && INET_ECN_set_ce(skb))
25331d6c 442 qdisc_qstats_drop(sch); /* mark packet */
e4ae004b
ED
443 else
444 --count;
445 }
0afb51e7 446 if (count == 0) {
25331d6c 447 qdisc_qstats_drop(sch);
520ac30f 448 __qdisc_drop(skb, to_free);
c27f339a 449 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1da177e4
LT
450 }
451
5a308f40
ED
452 /* If a delay is expected, orphan the skb. (orphaning usually takes
453 * place at TX completion time, so _before_ the link transit delay)
5a308f40
ED
454 */
455 if (q->latency || q->jitter)
f2f872f9 456 skb_orphan_partial(skb);
4e8a5201 457
0afb51e7
SH
458 /*
459 * If we need to duplicate packet, then re-insert at top of the
460 * qdisc tree, since parent queuer expects that only one
461 * skb will be queued.
462 */
463 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
7698b4fc 464 struct Qdisc *rootq = qdisc_root(sch);
0afb51e7 465 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
0afb51e7 466
b396cca6 467 q->duplicate = 0;
520ac30f 468 rootq->enqueue(skb2, rootq, to_free);
0afb51e7 469 q->duplicate = dupsave;
1da177e4
LT
470 }
471
c865e5d9
SH
472 /*
473 * Randomized packet corruption.
474 * Make copy if needed since we are modifying
475 * If packet is going to be hardware checksummed, then
476 * do it now in software before we mangle it.
477 */
478 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
6071bd1a 479 if (skb_is_gso(skb)) {
520ac30f 480 segs = netem_segment(skb, sch, to_free);
6071bd1a
NH
481 if (!segs)
482 return NET_XMIT_DROP;
483 } else {
484 segs = skb;
485 }
486
487 skb = segs;
488 segs = segs->next;
489
f64f9e71
JP
490 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
491 (skb->ip_summed == CHECKSUM_PARTIAL &&
6071bd1a 492 skb_checksum_help(skb))) {
520ac30f 493 rc = qdisc_drop(skb, sch, to_free);
6071bd1a
NH
494 goto finish_segs;
495 }
c865e5d9 496
63862b5b
AH
497 skb->data[prandom_u32() % skb_headlen(skb)] ^=
498 1<<(prandom_u32() % 8);
c865e5d9
SH
499 }
500
960fb66e 501 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
520ac30f 502 return qdisc_drop(skb, sch, to_free);
960fb66e 503
25331d6c 504 qdisc_qstats_backlog_inc(sch, skb);
960fb66e 505
5f86173b 506 cb = netem_skb_cb(skb);
cc7ec456 507 if (q->gap == 0 || /* not doing reordering */
a42b4799 508 q->counter < q->gap - 1 || /* inside last reordering gap */
f64f9e71 509 q->reorder < get_crandom(&q->reorder_cor)) {
0f9f32ac 510 psched_time_t now;
07aaa115
SH
511 psched_tdiff_t delay;
512
513 delay = tabledist(q->latency, q->jitter,
514 &q->delay_cor, q->delay_dist);
515
3bebcda2 516 now = psched_get_time();
7bc0f28c
HPP
517
518 if (q->rate) {
aec0a40a 519 struct sk_buff *last;
7bc0f28c 520
aec0a40a
ED
521 if (!skb_queue_empty(&sch->q))
522 last = skb_peek_tail(&sch->q);
523 else
524 last = netem_rb_to_skb(rb_last(&q->t_root));
525 if (last) {
7bc0f28c 526 /*
a13d3104
JN
527 * Last packet in queue is reference point (now),
528 * calculate this time bonus and subtract
7bc0f28c
HPP
529 * from delay.
530 */
aec0a40a 531 delay -= netem_skb_cb(last)->time_to_send - now;
a13d3104 532 delay = max_t(psched_tdiff_t, 0, delay);
aec0a40a 533 now = netem_skb_cb(last)->time_to_send;
7bc0f28c 534 }
a13d3104 535
8cfd88d6 536 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
7bc0f28c
HPP
537 }
538
7c59e25f 539 cb->time_to_send = now + delay;
aec0a40a 540 cb->tstamp_save = skb->tstamp;
1da177e4 541 ++q->counter;
960fb66e 542 tfifo_enqueue(skb, sch);
1da177e4 543 } else {
10297b99 544 /*
0dca51d3
SH
545 * Do re-ordering by putting one out of N packets at the front
546 * of the queue.
547 */
3bebcda2 548 cb->time_to_send = psched_get_time();
0dca51d3 549 q->counter = 0;
8ba25dad 550
50612537 551 __skb_queue_head(&sch->q, skb);
eb101924 552 sch->qstats.requeues++;
378a2f09 553 }
1da177e4 554
6071bd1a
NH
555finish_segs:
556 if (segs) {
557 while (segs) {
558 skb2 = segs->next;
559 segs->next = NULL;
560 qdisc_skb_cb(segs)->pkt_len = segs->len;
561 last_len = segs->len;
520ac30f 562 rc = qdisc_enqueue(segs, sch, to_free);
6071bd1a
NH
563 if (rc != NET_XMIT_SUCCESS) {
564 if (net_xmit_drop_count(rc))
565 qdisc_qstats_drop(sch);
566 } else {
567 nb++;
568 len += last_len;
569 }
570 segs = skb2;
571 }
572 sch->q.qlen += nb;
573 if (nb > 1)
574 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
575 }
10f6dfcf 576 return NET_XMIT_SUCCESS;
1da177e4
LT
577}
578
1da177e4
LT
579static struct sk_buff *netem_dequeue(struct Qdisc *sch)
580{
581 struct netem_sched_data *q = qdisc_priv(sch);
582 struct sk_buff *skb;
aec0a40a 583 struct rb_node *p;
1da177e4 584
50612537 585tfifo_dequeue:
aec0a40a 586 skb = __skb_dequeue(&sch->q);
771018e7 587 if (skb) {
25331d6c 588 qdisc_qstats_backlog_dec(sch, skb);
0ad2a836 589deliver:
aec0a40a
ED
590 qdisc_bstats_update(sch, skb);
591 return skb;
592 }
593 p = rb_first(&q->t_root);
594 if (p) {
36b7bfe0
ED
595 psched_time_t time_to_send;
596
aec0a40a 597 skb = netem_rb_to_skb(p);
0f9f32ac
SH
598
599 /* if more time remaining? */
36b7bfe0
ED
600 time_to_send = netem_skb_cb(skb)->time_to_send;
601 if (time_to_send <= psched_get_time()) {
aec0a40a
ED
602 rb_erase(p, &q->t_root);
603
604 sch->q.qlen--;
0ad2a836 605 qdisc_qstats_backlog_dec(sch, skb);
aec0a40a
ED
606 skb->next = NULL;
607 skb->prev = NULL;
608 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
03c05f0d 609
8caf1539
JP
610#ifdef CONFIG_NET_CLS_ACT
611 /*
612 * If it's at ingress let's pretend the delay is
613 * from the network (tstamp will be updated).
614 */
615 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
616 skb->tstamp.tv64 = 0;
617#endif
10f6dfcf 618
50612537 619 if (q->qdisc) {
520ac30f
ED
620 struct sk_buff *to_free = NULL;
621 int err;
50612537 622
520ac30f
ED
623 err = qdisc_enqueue(skb, q->qdisc, &to_free);
624 kfree_skb_list(to_free);
50612537
ED
625 if (unlikely(err != NET_XMIT_SUCCESS)) {
626 if (net_xmit_drop_count(err)) {
25331d6c 627 qdisc_qstats_drop(sch);
2ccccf5f
WC
628 qdisc_tree_reduce_backlog(sch, 1,
629 qdisc_pkt_len(skb));
50612537
ED
630 }
631 }
632 goto tfifo_dequeue;
633 }
aec0a40a 634 goto deliver;
07aaa115 635 }
11274e5a 636
50612537
ED
637 if (q->qdisc) {
638 skb = q->qdisc->ops->dequeue(q->qdisc);
639 if (skb)
640 goto deliver;
641 }
36b7bfe0 642 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
0f9f32ac
SH
643 }
644
50612537
ED
645 if (q->qdisc) {
646 skb = q->qdisc->ops->dequeue(q->qdisc);
647 if (skb)
648 goto deliver;
649 }
0f9f32ac 650 return NULL;
1da177e4
LT
651}
652
1da177e4
LT
653static void netem_reset(struct Qdisc *sch)
654{
655 struct netem_sched_data *q = qdisc_priv(sch);
656
50612537 657 qdisc_reset_queue(sch);
ff704050 658 tfifo_reset(sch);
50612537
ED
659 if (q->qdisc)
660 qdisc_reset(q->qdisc);
59cb5c67 661 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
662}
663
6373a9a2 664static void dist_free(struct disttable *d)
665{
4cb28970 666 kvfree(d);
6373a9a2 667}
668
1da177e4
LT
669/*
670 * Distribution data is a variable size payload containing
671 * signed 16 bit values.
672 */
1e90474c 673static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
1da177e4
LT
674{
675 struct netem_sched_data *q = qdisc_priv(sch);
6373a9a2 676 size_t n = nla_len(attr)/sizeof(__s16);
1e90474c 677 const __s16 *data = nla_data(attr);
7698b4fc 678 spinlock_t *root_lock;
1da177e4
LT
679 struct disttable *d;
680 int i;
6373a9a2 681 size_t s;
1da177e4 682
df173bda 683 if (n > NETEM_DIST_MAX)
1da177e4
LT
684 return -EINVAL;
685
6373a9a2 686 s = sizeof(struct disttable) + n * sizeof(s16);
bb52c7ac 687 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
6373a9a2 688 if (!d)
689 d = vmalloc(s);
1da177e4
LT
690 if (!d)
691 return -ENOMEM;
692
693 d->size = n;
694 for (i = 0; i < n; i++)
695 d->table[i] = data[i];
10297b99 696
102396ae 697 root_lock = qdisc_root_sleeping_lock(sch);
7698b4fc
DM
698
699 spin_lock_bh(root_lock);
bb52c7ac 700 swap(q->delay_dist, d);
7698b4fc 701 spin_unlock_bh(root_lock);
bb52c7ac
ED
702
703 dist_free(d);
1da177e4
LT
704 return 0;
705}
706
49545a77 707static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
1da177e4 708{
1e90474c 709 const struct tc_netem_corr *c = nla_data(attr);
1da177e4 710
1da177e4
LT
711 init_crandom(&q->delay_cor, c->delay_corr);
712 init_crandom(&q->loss_cor, c->loss_corr);
713 init_crandom(&q->dup_cor, c->dup_corr);
1da177e4
LT
714}
715
49545a77 716static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
0dca51d3 717{
1e90474c 718 const struct tc_netem_reorder *r = nla_data(attr);
0dca51d3 719
0dca51d3
SH
720 q->reorder = r->probability;
721 init_crandom(&q->reorder_cor, r->correlation);
0dca51d3
SH
722}
723
49545a77 724static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
c865e5d9 725{
1e90474c 726 const struct tc_netem_corrupt *r = nla_data(attr);
c865e5d9 727
c865e5d9
SH
728 q->corrupt = r->probability;
729 init_crandom(&q->corrupt_cor, r->correlation);
c865e5d9
SH
730}
731
49545a77 732static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
7bc0f28c 733{
7bc0f28c
HPP
734 const struct tc_netem_rate *r = nla_data(attr);
735
736 q->rate = r->rate;
90b41a1c
HPP
737 q->packet_overhead = r->packet_overhead;
738 q->cell_size = r->cell_size;
809fa972 739 q->cell_overhead = r->cell_overhead;
90b41a1c
HPP
740 if (q->cell_size)
741 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
809fa972
HFS
742 else
743 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
7bc0f28c
HPP
744}
745
49545a77 746static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
661b7972 747{
661b7972 748 const struct nlattr *la;
749 int rem;
750
751 nla_for_each_nested(la, attr, rem) {
752 u16 type = nla_type(la);
753
833fa743 754 switch (type) {
661b7972 755 case NETEM_LOSS_GI: {
756 const struct tc_netem_gimodel *gi = nla_data(la);
757
2494654d 758 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
661b7972 759 pr_info("netem: incorrect gi model size\n");
760 return -EINVAL;
761 }
762
763 q->loss_model = CLG_4_STATES;
764
3fbac2a8 765 q->clg.state = TX_IN_GAP_PERIOD;
661b7972 766 q->clg.a1 = gi->p13;
767 q->clg.a2 = gi->p31;
768 q->clg.a3 = gi->p32;
769 q->clg.a4 = gi->p14;
770 q->clg.a5 = gi->p23;
771 break;
772 }
773
774 case NETEM_LOSS_GE: {
775 const struct tc_netem_gemodel *ge = nla_data(la);
776
2494654d 777 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
778 pr_info("netem: incorrect ge model size\n");
661b7972 779 return -EINVAL;
780 }
781
782 q->loss_model = CLG_GILB_ELL;
3fbac2a8 783 q->clg.state = GOOD_STATE;
661b7972 784 q->clg.a1 = ge->p;
785 q->clg.a2 = ge->r;
786 q->clg.a3 = ge->h;
787 q->clg.a4 = ge->k1;
788 break;
789 }
790
791 default:
792 pr_info("netem: unknown loss type %u\n", type);
793 return -EINVAL;
794 }
795 }
796
797 return 0;
798}
799
27a3421e
PM
800static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
801 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
802 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
803 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
7bc0f28c 804 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
661b7972 805 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
e4ae004b 806 [TCA_NETEM_ECN] = { .type = NLA_U32 },
6a031f67 807 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
27a3421e
PM
808};
809
2c10b32b
TG
810static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
811 const struct nla_policy *policy, int len)
812{
813 int nested_len = nla_len(nla) - NLA_ALIGN(len);
814
661b7972 815 if (nested_len < 0) {
816 pr_info("netem: invalid attributes len %d\n", nested_len);
2c10b32b 817 return -EINVAL;
661b7972 818 }
819
2c10b32b
TG
820 if (nested_len >= nla_attr_size(0))
821 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
822 nested_len, policy);
661b7972 823
2c10b32b
TG
824 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
825 return 0;
826}
827
c865e5d9 828/* Parse netlink message to set options */
1e90474c 829static int netem_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
830{
831 struct netem_sched_data *q = qdisc_priv(sch);
b03f4672 832 struct nlattr *tb[TCA_NETEM_MAX + 1];
1da177e4 833 struct tc_netem_qopt *qopt;
54a4b05c
YY
834 struct clgstate old_clg;
835 int old_loss_model = CLG_RANDOM;
1da177e4 836 int ret;
10297b99 837
b03f4672 838 if (opt == NULL)
1da177e4
LT
839 return -EINVAL;
840
2c10b32b
TG
841 qopt = nla_data(opt);
842 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
b03f4672
PM
843 if (ret < 0)
844 return ret;
845
54a4b05c
YY
846 /* backup q->clg and q->loss_model */
847 old_clg = q->clg;
848 old_loss_model = q->loss_model;
849
850 if (tb[TCA_NETEM_LOSS]) {
49545a77 851 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
54a4b05c
YY
852 if (ret) {
853 q->loss_model = old_loss_model;
854 return ret;
855 }
856 } else {
857 q->loss_model = CLG_RANDOM;
858 }
859
860 if (tb[TCA_NETEM_DELAY_DIST]) {
861 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
862 if (ret) {
863 /* recover clg and loss_model, in case of
864 * q->clg and q->loss_model were modified
865 * in get_loss_clg()
866 */
867 q->clg = old_clg;
868 q->loss_model = old_loss_model;
869 return ret;
870 }
871 }
872
50612537 873 sch->limit = qopt->limit;
10297b99 874
1da177e4
LT
875 q->latency = qopt->latency;
876 q->jitter = qopt->jitter;
877 q->limit = qopt->limit;
878 q->gap = qopt->gap;
0dca51d3 879 q->counter = 0;
1da177e4
LT
880 q->loss = qopt->loss;
881 q->duplicate = qopt->duplicate;
882
bb2f8cc0
SH
883 /* for compatibility with earlier versions.
884 * if gap is set, need to assume 100% probability
0dca51d3 885 */
a362e0a7
SH
886 if (q->gap)
887 q->reorder = ~0;
0dca51d3 888
265eb67f 889 if (tb[TCA_NETEM_CORR])
49545a77 890 get_correlation(q, tb[TCA_NETEM_CORR]);
1da177e4 891
265eb67f 892 if (tb[TCA_NETEM_REORDER])
49545a77 893 get_reorder(q, tb[TCA_NETEM_REORDER]);
1da177e4 894
265eb67f 895 if (tb[TCA_NETEM_CORRUPT])
49545a77 896 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1da177e4 897
7bc0f28c 898 if (tb[TCA_NETEM_RATE])
49545a77 899 get_rate(q, tb[TCA_NETEM_RATE]);
7bc0f28c 900
6a031f67
YY
901 if (tb[TCA_NETEM_RATE64])
902 q->rate = max_t(u64, q->rate,
903 nla_get_u64(tb[TCA_NETEM_RATE64]));
904
e4ae004b
ED
905 if (tb[TCA_NETEM_ECN])
906 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
907
661b7972 908 return ret;
1da177e4
LT
909}
910
1e90474c 911static int netem_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
912{
913 struct netem_sched_data *q = qdisc_priv(sch);
914 int ret;
915
916 if (!opt)
917 return -EINVAL;
918
59cb5c67 919 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4 920
661b7972 921 q->loss_model = CLG_RANDOM;
1da177e4 922 ret = netem_change(sch, opt);
50612537 923 if (ret)
250a65f7 924 pr_info("netem: change failed\n");
1da177e4
LT
925 return ret;
926}
927
928static void netem_destroy(struct Qdisc *sch)
929{
930 struct netem_sched_data *q = qdisc_priv(sch);
931
59cb5c67 932 qdisc_watchdog_cancel(&q->watchdog);
50612537
ED
933 if (q->qdisc)
934 qdisc_destroy(q->qdisc);
6373a9a2 935 dist_free(q->delay_dist);
1da177e4
LT
936}
937
661b7972 938static int dump_loss_model(const struct netem_sched_data *q,
939 struct sk_buff *skb)
940{
941 struct nlattr *nest;
942
943 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
944 if (nest == NULL)
945 goto nla_put_failure;
946
947 switch (q->loss_model) {
948 case CLG_RANDOM:
949 /* legacy loss model */
950 nla_nest_cancel(skb, nest);
951 return 0; /* no data */
952
953 case CLG_4_STATES: {
954 struct tc_netem_gimodel gi = {
955 .p13 = q->clg.a1,
956 .p31 = q->clg.a2,
957 .p32 = q->clg.a3,
958 .p14 = q->clg.a4,
959 .p23 = q->clg.a5,
960 };
961
1b34ec43
DM
962 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
963 goto nla_put_failure;
661b7972 964 break;
965 }
966 case CLG_GILB_ELL: {
967 struct tc_netem_gemodel ge = {
968 .p = q->clg.a1,
969 .r = q->clg.a2,
970 .h = q->clg.a3,
971 .k1 = q->clg.a4,
972 };
973
1b34ec43
DM
974 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
975 goto nla_put_failure;
661b7972 976 break;
977 }
978 }
979
980 nla_nest_end(skb, nest);
981 return 0;
982
983nla_put_failure:
984 nla_nest_cancel(skb, nest);
985 return -1;
986}
987
1da177e4
LT
988static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
989{
990 const struct netem_sched_data *q = qdisc_priv(sch);
861d7f74 991 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1da177e4
LT
992 struct tc_netem_qopt qopt;
993 struct tc_netem_corr cor;
0dca51d3 994 struct tc_netem_reorder reorder;
c865e5d9 995 struct tc_netem_corrupt corrupt;
7bc0f28c 996 struct tc_netem_rate rate;
1da177e4
LT
997
998 qopt.latency = q->latency;
999 qopt.jitter = q->jitter;
1000 qopt.limit = q->limit;
1001 qopt.loss = q->loss;
1002 qopt.gap = q->gap;
1003 qopt.duplicate = q->duplicate;
1b34ec43
DM
1004 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1005 goto nla_put_failure;
1da177e4
LT
1006
1007 cor.delay_corr = q->delay_cor.rho;
1008 cor.loss_corr = q->loss_cor.rho;
1009 cor.dup_corr = q->dup_cor.rho;
1b34ec43
DM
1010 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1011 goto nla_put_failure;
0dca51d3
SH
1012
1013 reorder.probability = q->reorder;
1014 reorder.correlation = q->reorder_cor.rho;
1b34ec43
DM
1015 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1016 goto nla_put_failure;
0dca51d3 1017
c865e5d9
SH
1018 corrupt.probability = q->corrupt;
1019 corrupt.correlation = q->corrupt_cor.rho;
1b34ec43
DM
1020 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1021 goto nla_put_failure;
c865e5d9 1022
6a031f67 1023 if (q->rate >= (1ULL << 32)) {
2a51c1e8
ND
1024 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1025 TCA_NETEM_PAD))
6a031f67
YY
1026 goto nla_put_failure;
1027 rate.rate = ~0U;
1028 } else {
1029 rate.rate = q->rate;
1030 }
90b41a1c
HPP
1031 rate.packet_overhead = q->packet_overhead;
1032 rate.cell_size = q->cell_size;
1033 rate.cell_overhead = q->cell_overhead;
1b34ec43
DM
1034 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1035 goto nla_put_failure;
7bc0f28c 1036
e4ae004b
ED
1037 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1038 goto nla_put_failure;
1039
661b7972 1040 if (dump_loss_model(q, skb) != 0)
1041 goto nla_put_failure;
1042
861d7f74 1043 return nla_nest_end(skb, nla);
1da177e4 1044
1e90474c 1045nla_put_failure:
861d7f74 1046 nlmsg_trim(skb, nla);
1da177e4
LT
1047 return -1;
1048}
1049
10f6dfcf 1050static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1051 struct sk_buff *skb, struct tcmsg *tcm)
1052{
1053 struct netem_sched_data *q = qdisc_priv(sch);
1054
50612537 1055 if (cl != 1 || !q->qdisc) /* only one class */
10f6dfcf 1056 return -ENOENT;
1057
1058 tcm->tcm_handle |= TC_H_MIN(1);
1059 tcm->tcm_info = q->qdisc->handle;
1060
1061 return 0;
1062}
1063
1064static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1065 struct Qdisc **old)
1066{
1067 struct netem_sched_data *q = qdisc_priv(sch);
1068
86a7996c 1069 *old = qdisc_replace(sch, new, &q->qdisc);
10f6dfcf 1070 return 0;
1071}
1072
1073static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1074{
1075 struct netem_sched_data *q = qdisc_priv(sch);
1076 return q->qdisc;
1077}
1078
1079static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1080{
1081 return 1;
1082}
1083
1084static void netem_put(struct Qdisc *sch, unsigned long arg)
1085{
1086}
1087
1088static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1089{
1090 if (!walker->stop) {
1091 if (walker->count >= walker->skip)
1092 if (walker->fn(sch, 1, walker) < 0) {
1093 walker->stop = 1;
1094 return;
1095 }
1096 walker->count++;
1097 }
1098}
1099
1100static const struct Qdisc_class_ops netem_class_ops = {
1101 .graft = netem_graft,
1102 .leaf = netem_leaf,
1103 .get = netem_get,
1104 .put = netem_put,
1105 .walk = netem_walk,
1106 .dump = netem_dump_class,
1107};
1108
20fea08b 1109static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1da177e4 1110 .id = "netem",
10f6dfcf 1111 .cl_ops = &netem_class_ops,
1da177e4
LT
1112 .priv_size = sizeof(struct netem_sched_data),
1113 .enqueue = netem_enqueue,
1114 .dequeue = netem_dequeue,
77be155c 1115 .peek = qdisc_peek_dequeued,
1da177e4
LT
1116 .init = netem_init,
1117 .reset = netem_reset,
1118 .destroy = netem_destroy,
1119 .change = netem_change,
1120 .dump = netem_dump,
1121 .owner = THIS_MODULE,
1122};
1123
1124
1125static int __init netem_module_init(void)
1126{
eb229c4c 1127 pr_info("netem: version " VERSION "\n");
1da177e4
LT
1128 return register_qdisc(&netem_qdisc_ops);
1129}
1130static void __exit netem_module_exit(void)
1131{
1132 unregister_qdisc(&netem_qdisc_ops);
1133}
1134module_init(netem_module_init)
1135module_exit(netem_module_exit)
1136MODULE_LICENSE("GPL");
This page took 0.97962 seconds and 5 git commands to generate.