Merge tag 'armsoc-arm64' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[deliverable/linux.git] / net / sched / sch_netem.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
798b6b19 7 * 2 of the License.
1da177e4
LT
8 *
9 * Many of the algorithms and ideas for this came from
10297b99 10 * NIST Net which is not copyrighted.
1da177e4
LT
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
b7f080cf 16#include <linux/mm.h>
1da177e4 17#include <linux/module.h>
5a0e3ad6 18#include <linux/slab.h>
1da177e4
LT
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
1da177e4 22#include <linux/skbuff.h>
78776d3f 23#include <linux/vmalloc.h>
1da177e4 24#include <linux/rtnetlink.h>
90b41a1c 25#include <linux/reciprocal_div.h>
aec0a40a 26#include <linux/rbtree.h>
1da177e4 27
dc5fc579 28#include <net/netlink.h>
1da177e4 29#include <net/pkt_sched.h>
e4ae004b 30#include <net/inet_ecn.h>
1da177e4 31
250a65f7 32#define VERSION "1.3"
eb229c4c 33
1da177e4
LT
34/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
661b7972 55
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
1da177e4
LT
69*/
70
71struct netem_sched_data {
aec0a40a
ED
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
50612537
ED
74
75 /* optional qdisc for classful handling (NULL at netem init) */
1da177e4 76 struct Qdisc *qdisc;
50612537 77
59cb5c67 78 struct qdisc_watchdog watchdog;
1da177e4 79
b407621c
SH
80 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
82
1da177e4 83 u32 loss;
e4ae004b 84 u32 ecn;
1da177e4
LT
85 u32 limit;
86 u32 counter;
87 u32 gap;
1da177e4 88 u32 duplicate;
0dca51d3 89 u32 reorder;
c865e5d9 90 u32 corrupt;
6a031f67 91 u64 rate;
90b41a1c
HPP
92 s32 packet_overhead;
93 u32 cell_size;
809fa972 94 struct reciprocal_value cell_size_reciprocal;
90b41a1c 95 s32 cell_overhead;
1da177e4
LT
96
97 struct crndstate {
b407621c
SH
98 u32 last;
99 u32 rho;
c865e5d9 100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
1da177e4
LT
101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
661b7972 106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
a6e2fe17
YY
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
c045a734
YY
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
661b7972 125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
1da177e4
LT
138};
139
50612537
ED
140/* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
56b17425
ED
142 *
143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
144 * and skb->next & skb->prev are scratch space for a qdisc,
145 * we save skb->tstamp value in skb->cb[] before destroying it.
50612537 146 */
1da177e4
LT
147struct netem_skb_cb {
148 psched_time_t time_to_send;
aec0a40a 149 ktime_t tstamp_save;
1da177e4
LT
150};
151
aec0a40a
ED
152
153static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
154{
56b17425 155 return container_of(rb, struct sk_buff, rbnode);
aec0a40a
ED
156}
157
5f86173b
JK
158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159{
aec0a40a 160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
16bda13d 161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
175f9c1b 162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
5f86173b
JK
163}
164
1da177e4
LT
165/* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
167 */
168static void init_crandom(struct crndstate *state, unsigned long rho)
169{
170 state->rho = rho;
63862b5b 171 state->last = prandom_u32();
1da177e4
LT
172}
173
174/* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
177 */
b407621c 178static u32 get_crandom(struct crndstate *state)
1da177e4
LT
179{
180 u64 value, rho;
181 unsigned long answer;
182
bb2f8cc0 183 if (state->rho == 0) /* no correlation */
63862b5b 184 return prandom_u32();
1da177e4 185
63862b5b 186 value = prandom_u32();
1da177e4
LT
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
191}
192
661b7972 193/* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
196 */
197static bool loss_4state(struct netem_sched_data *q)
198{
199 struct clgstate *clg = &q->clg;
63862b5b 200 u32 rnd = prandom_u32();
661b7972 201
202 /*
25985edc 203 * Makes a comparison between rnd and the transition
661b7972 204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
a6e2fe17
YY
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
661b7972 211 */
212 switch (clg->state) {
a6e2fe17 213 case TX_IN_GAP_PERIOD:
661b7972 214 if (rnd < clg->a4) {
a6e2fe17 215 clg->state = LOST_IN_BURST_PERIOD;
661b7972 216 return true;
ab6c27be 217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
a6e2fe17 218 clg->state = LOST_IN_GAP_PERIOD;
661b7972 219 return true;
a6e2fe17
YY
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
222 }
661b7972 223
224 break;
a6e2fe17 225 case TX_IN_BURST_PERIOD:
661b7972 226 if (rnd < clg->a5) {
a6e2fe17 227 clg->state = LOST_IN_GAP_PERIOD;
661b7972 228 return true;
a6e2fe17
YY
229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
231 }
661b7972 232
233 break;
a6e2fe17 234 case LOST_IN_GAP_PERIOD:
661b7972 235 if (rnd < clg->a3)
a6e2fe17 236 clg->state = TX_IN_BURST_PERIOD;
661b7972 237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
a6e2fe17 238 clg->state = TX_IN_GAP_PERIOD;
661b7972 239 } else if (clg->a2 + clg->a3 < rnd) {
a6e2fe17 240 clg->state = LOST_IN_GAP_PERIOD;
661b7972 241 return true;
242 }
243 break;
a6e2fe17
YY
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
661b7972 246 break;
247 }
248
249 return false;
250}
251
252/* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
255 *
25985edc 256 * Makes a comparison between random number and the transition
661b7972 257 * probabilities outgoing from the current state, then decides the
25985edc 258 * next state. A second random number is extracted and the comparison
661b7972 259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
261 */
262static bool loss_gilb_ell(struct netem_sched_data *q)
263{
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
c045a734 267 case GOOD_STATE:
63862b5b 268 if (prandom_u32() < clg->a1)
c045a734 269 clg->state = BAD_STATE;
63862b5b 270 if (prandom_u32() < clg->a4)
661b7972 271 return true;
7c2781fa 272 break;
c045a734 273 case BAD_STATE:
63862b5b 274 if (prandom_u32() < clg->a2)
c045a734 275 clg->state = GOOD_STATE;
63862b5b 276 if (prandom_u32() > clg->a3)
661b7972 277 return true;
278 }
279
280 return false;
281}
282
283static bool loss_event(struct netem_sched_data *q)
284{
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
289
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
295 */
296 return loss_4state(q);
297
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
303 */
304 return loss_gilb_ell(q);
305 }
306
307 return false; /* not reached */
308}
309
310
1da177e4
LT
311/* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
314 */
b407621c
SH
315static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
1da177e4 318{
b407621c
SH
319 psched_tdiff_t x;
320 long t;
321 u32 rnd;
1da177e4
LT
322
323 if (sigma == 0)
324 return mu;
325
326 rnd = get_crandom(state);
327
328 /* default uniform distribution */
10297b99 329 if (dist == NULL)
1da177e4
LT
330 return (rnd % (2*sigma)) - sigma + mu;
331
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
338
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
340}
341
90b41a1c 342static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
7bc0f28c 343{
90b41a1c 344 u64 ticks;
fc33cc72 345
90b41a1c
HPP
346 len += q->packet_overhead;
347
348 if (q->cell_size) {
349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
350
351 if (len > cells * q->cell_size) /* extra cell needed for remainder */
352 cells++;
353 len = cells * (q->cell_size + q->cell_overhead);
354 }
355
356 ticks = (u64)len * NSEC_PER_SEC;
357
358 do_div(ticks, q->rate);
fc33cc72 359 return PSCHED_NS2TICKS(ticks);
7bc0f28c
HPP
360}
361
ff704050 362static void tfifo_reset(struct Qdisc *sch)
363{
364 struct netem_sched_data *q = qdisc_priv(sch);
365 struct rb_node *p;
366
367 while ((p = rb_first(&q->t_root))) {
368 struct sk_buff *skb = netem_rb_to_skb(p);
369
370 rb_erase(p, &q->t_root);
2f08a9a1 371 rtnl_kfree_skbs(skb, skb);
ff704050 372 }
373}
374
960fb66e 375static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
50612537 376{
aec0a40a 377 struct netem_sched_data *q = qdisc_priv(sch);
50612537 378 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
aec0a40a 379 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
50612537 380
aec0a40a
ED
381 while (*p) {
382 struct sk_buff *skb;
50612537 383
aec0a40a
ED
384 parent = *p;
385 skb = netem_rb_to_skb(parent);
960fb66e 386 if (tnext >= netem_skb_cb(skb)->time_to_send)
aec0a40a
ED
387 p = &parent->rb_right;
388 else
389 p = &parent->rb_left;
50612537 390 }
56b17425
ED
391 rb_link_node(&nskb->rbnode, parent, p);
392 rb_insert_color(&nskb->rbnode, &q->t_root);
aec0a40a 393 sch->q.qlen++;
50612537
ED
394}
395
6071bd1a
NH
396/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
397 * when we statistically choose to corrupt one, we instead segment it, returning
398 * the first packet to be corrupted, and re-enqueue the remaining frames
399 */
520ac30f
ED
400static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
401 struct sk_buff **to_free)
6071bd1a
NH
402{
403 struct sk_buff *segs;
404 netdev_features_t features = netif_skb_features(skb);
405
406 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
407
408 if (IS_ERR_OR_NULL(segs)) {
520ac30f 409 qdisc_drop(skb, sch, to_free);
6071bd1a
NH
410 return NULL;
411 }
412 consume_skb(skb);
413 return segs;
414}
415
0afb51e7
SH
416/*
417 * Insert one skb into qdisc.
418 * Note: parent depends on return value to account for queue length.
419 * NET_XMIT_DROP: queue length didn't change.
420 * NET_XMIT_SUCCESS: one skb was queued.
421 */
520ac30f
ED
422static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
423 struct sk_buff **to_free)
1da177e4
LT
424{
425 struct netem_sched_data *q = qdisc_priv(sch);
89e1df74
GC
426 /* We don't fill cb now as skb_unshare() may invalidate it */
427 struct netem_skb_cb *cb;
0afb51e7 428 struct sk_buff *skb2;
6071bd1a
NH
429 struct sk_buff *segs = NULL;
430 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
431 int nb = 0;
0afb51e7 432 int count = 1;
6071bd1a 433 int rc = NET_XMIT_SUCCESS;
1da177e4 434
0afb51e7
SH
435 /* Random duplication */
436 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
437 ++count;
438
661b7972 439 /* Drop packet? */
e4ae004b
ED
440 if (loss_event(q)) {
441 if (q->ecn && INET_ECN_set_ce(skb))
25331d6c 442 qdisc_qstats_drop(sch); /* mark packet */
e4ae004b
ED
443 else
444 --count;
445 }
0afb51e7 446 if (count == 0) {
25331d6c 447 qdisc_qstats_drop(sch);
520ac30f 448 __qdisc_drop(skb, to_free);
c27f339a 449 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1da177e4
LT
450 }
451
5a308f40
ED
452 /* If a delay is expected, orphan the skb. (orphaning usually takes
453 * place at TX completion time, so _before_ the link transit delay)
5a308f40
ED
454 */
455 if (q->latency || q->jitter)
f2f872f9 456 skb_orphan_partial(skb);
4e8a5201 457
0afb51e7
SH
458 /*
459 * If we need to duplicate packet, then re-insert at top of the
460 * qdisc tree, since parent queuer expects that only one
461 * skb will be queued.
462 */
463 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
7698b4fc 464 struct Qdisc *rootq = qdisc_root(sch);
0afb51e7 465 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
0afb51e7 466
b396cca6 467 q->duplicate = 0;
520ac30f 468 rootq->enqueue(skb2, rootq, to_free);
0afb51e7 469 q->duplicate = dupsave;
1da177e4
LT
470 }
471
c865e5d9
SH
472 /*
473 * Randomized packet corruption.
474 * Make copy if needed since we are modifying
475 * If packet is going to be hardware checksummed, then
476 * do it now in software before we mangle it.
477 */
478 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
6071bd1a 479 if (skb_is_gso(skb)) {
520ac30f 480 segs = netem_segment(skb, sch, to_free);
6071bd1a
NH
481 if (!segs)
482 return NET_XMIT_DROP;
483 } else {
484 segs = skb;
485 }
486
487 skb = segs;
488 segs = segs->next;
489
8a6e9c67
ED
490 skb = skb_unshare(skb, GFP_ATOMIC);
491 if (unlikely(!skb)) {
492 qdisc_qstats_drop(sch);
493 goto finish_segs;
494 }
495 if (skb->ip_summed == CHECKSUM_PARTIAL &&
496 skb_checksum_help(skb)) {
497 qdisc_drop(skb, sch, to_free);
6071bd1a
NH
498 goto finish_segs;
499 }
c865e5d9 500
63862b5b
AH
501 skb->data[prandom_u32() % skb_headlen(skb)] ^=
502 1<<(prandom_u32() % 8);
c865e5d9
SH
503 }
504
960fb66e 505 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
520ac30f 506 return qdisc_drop(skb, sch, to_free);
960fb66e 507
25331d6c 508 qdisc_qstats_backlog_inc(sch, skb);
960fb66e 509
5f86173b 510 cb = netem_skb_cb(skb);
cc7ec456 511 if (q->gap == 0 || /* not doing reordering */
a42b4799 512 q->counter < q->gap - 1 || /* inside last reordering gap */
f64f9e71 513 q->reorder < get_crandom(&q->reorder_cor)) {
0f9f32ac 514 psched_time_t now;
07aaa115
SH
515 psched_tdiff_t delay;
516
517 delay = tabledist(q->latency, q->jitter,
518 &q->delay_cor, q->delay_dist);
519
3bebcda2 520 now = psched_get_time();
7bc0f28c
HPP
521
522 if (q->rate) {
aec0a40a 523 struct sk_buff *last;
7bc0f28c 524
aec0a40a
ED
525 if (!skb_queue_empty(&sch->q))
526 last = skb_peek_tail(&sch->q);
527 else
528 last = netem_rb_to_skb(rb_last(&q->t_root));
529 if (last) {
7bc0f28c 530 /*
a13d3104
JN
531 * Last packet in queue is reference point (now),
532 * calculate this time bonus and subtract
7bc0f28c
HPP
533 * from delay.
534 */
aec0a40a 535 delay -= netem_skb_cb(last)->time_to_send - now;
a13d3104 536 delay = max_t(psched_tdiff_t, 0, delay);
aec0a40a 537 now = netem_skb_cb(last)->time_to_send;
7bc0f28c 538 }
a13d3104 539
8cfd88d6 540 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
7bc0f28c
HPP
541 }
542
7c59e25f 543 cb->time_to_send = now + delay;
aec0a40a 544 cb->tstamp_save = skb->tstamp;
1da177e4 545 ++q->counter;
960fb66e 546 tfifo_enqueue(skb, sch);
1da177e4 547 } else {
10297b99 548 /*
0dca51d3
SH
549 * Do re-ordering by putting one out of N packets at the front
550 * of the queue.
551 */
3bebcda2 552 cb->time_to_send = psched_get_time();
0dca51d3 553 q->counter = 0;
8ba25dad 554
50612537 555 __skb_queue_head(&sch->q, skb);
eb101924 556 sch->qstats.requeues++;
378a2f09 557 }
1da177e4 558
6071bd1a
NH
559finish_segs:
560 if (segs) {
561 while (segs) {
562 skb2 = segs->next;
563 segs->next = NULL;
564 qdisc_skb_cb(segs)->pkt_len = segs->len;
565 last_len = segs->len;
520ac30f 566 rc = qdisc_enqueue(segs, sch, to_free);
6071bd1a
NH
567 if (rc != NET_XMIT_SUCCESS) {
568 if (net_xmit_drop_count(rc))
569 qdisc_qstats_drop(sch);
570 } else {
571 nb++;
572 len += last_len;
573 }
574 segs = skb2;
575 }
576 sch->q.qlen += nb;
577 if (nb > 1)
578 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
579 }
10f6dfcf 580 return NET_XMIT_SUCCESS;
1da177e4
LT
581}
582
1da177e4
LT
583static struct sk_buff *netem_dequeue(struct Qdisc *sch)
584{
585 struct netem_sched_data *q = qdisc_priv(sch);
586 struct sk_buff *skb;
aec0a40a 587 struct rb_node *p;
1da177e4 588
50612537 589tfifo_dequeue:
aec0a40a 590 skb = __skb_dequeue(&sch->q);
771018e7 591 if (skb) {
25331d6c 592 qdisc_qstats_backlog_dec(sch, skb);
0ad2a836 593deliver:
aec0a40a
ED
594 qdisc_bstats_update(sch, skb);
595 return skb;
596 }
597 p = rb_first(&q->t_root);
598 if (p) {
36b7bfe0
ED
599 psched_time_t time_to_send;
600
aec0a40a 601 skb = netem_rb_to_skb(p);
0f9f32ac
SH
602
603 /* if more time remaining? */
36b7bfe0
ED
604 time_to_send = netem_skb_cb(skb)->time_to_send;
605 if (time_to_send <= psched_get_time()) {
aec0a40a
ED
606 rb_erase(p, &q->t_root);
607
608 sch->q.qlen--;
0ad2a836 609 qdisc_qstats_backlog_dec(sch, skb);
aec0a40a
ED
610 skb->next = NULL;
611 skb->prev = NULL;
612 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
03c05f0d 613
8caf1539
JP
614#ifdef CONFIG_NET_CLS_ACT
615 /*
616 * If it's at ingress let's pretend the delay is
617 * from the network (tstamp will be updated).
618 */
619 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
620 skb->tstamp.tv64 = 0;
621#endif
10f6dfcf 622
50612537 623 if (q->qdisc) {
21de12ee 624 unsigned int pkt_len = qdisc_pkt_len(skb);
520ac30f
ED
625 struct sk_buff *to_free = NULL;
626 int err;
50612537 627
520ac30f
ED
628 err = qdisc_enqueue(skb, q->qdisc, &to_free);
629 kfree_skb_list(to_free);
21de12ee
ED
630 if (err != NET_XMIT_SUCCESS &&
631 net_xmit_drop_count(err)) {
632 qdisc_qstats_drop(sch);
633 qdisc_tree_reduce_backlog(sch, 1,
634 pkt_len);
50612537
ED
635 }
636 goto tfifo_dequeue;
637 }
aec0a40a 638 goto deliver;
07aaa115 639 }
11274e5a 640
50612537
ED
641 if (q->qdisc) {
642 skb = q->qdisc->ops->dequeue(q->qdisc);
643 if (skb)
644 goto deliver;
645 }
36b7bfe0 646 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
0f9f32ac
SH
647 }
648
50612537
ED
649 if (q->qdisc) {
650 skb = q->qdisc->ops->dequeue(q->qdisc);
651 if (skb)
652 goto deliver;
653 }
0f9f32ac 654 return NULL;
1da177e4
LT
655}
656
1da177e4
LT
657static void netem_reset(struct Qdisc *sch)
658{
659 struct netem_sched_data *q = qdisc_priv(sch);
660
50612537 661 qdisc_reset_queue(sch);
ff704050 662 tfifo_reset(sch);
50612537
ED
663 if (q->qdisc)
664 qdisc_reset(q->qdisc);
59cb5c67 665 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
666}
667
6373a9a2 668static void dist_free(struct disttable *d)
669{
4cb28970 670 kvfree(d);
6373a9a2 671}
672
1da177e4
LT
673/*
674 * Distribution data is a variable size payload containing
675 * signed 16 bit values.
676 */
1e90474c 677static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
1da177e4
LT
678{
679 struct netem_sched_data *q = qdisc_priv(sch);
6373a9a2 680 size_t n = nla_len(attr)/sizeof(__s16);
1e90474c 681 const __s16 *data = nla_data(attr);
7698b4fc 682 spinlock_t *root_lock;
1da177e4
LT
683 struct disttable *d;
684 int i;
6373a9a2 685 size_t s;
1da177e4 686
df173bda 687 if (n > NETEM_DIST_MAX)
1da177e4
LT
688 return -EINVAL;
689
6373a9a2 690 s = sizeof(struct disttable) + n * sizeof(s16);
bb52c7ac 691 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
6373a9a2 692 if (!d)
693 d = vmalloc(s);
1da177e4
LT
694 if (!d)
695 return -ENOMEM;
696
697 d->size = n;
698 for (i = 0; i < n; i++)
699 d->table[i] = data[i];
10297b99 700
102396ae 701 root_lock = qdisc_root_sleeping_lock(sch);
7698b4fc
DM
702
703 spin_lock_bh(root_lock);
bb52c7ac 704 swap(q->delay_dist, d);
7698b4fc 705 spin_unlock_bh(root_lock);
bb52c7ac
ED
706
707 dist_free(d);
1da177e4
LT
708 return 0;
709}
710
49545a77 711static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
1da177e4 712{
1e90474c 713 const struct tc_netem_corr *c = nla_data(attr);
1da177e4 714
1da177e4
LT
715 init_crandom(&q->delay_cor, c->delay_corr);
716 init_crandom(&q->loss_cor, c->loss_corr);
717 init_crandom(&q->dup_cor, c->dup_corr);
1da177e4
LT
718}
719
49545a77 720static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
0dca51d3 721{
1e90474c 722 const struct tc_netem_reorder *r = nla_data(attr);
0dca51d3 723
0dca51d3
SH
724 q->reorder = r->probability;
725 init_crandom(&q->reorder_cor, r->correlation);
0dca51d3
SH
726}
727
49545a77 728static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
c865e5d9 729{
1e90474c 730 const struct tc_netem_corrupt *r = nla_data(attr);
c865e5d9 731
c865e5d9
SH
732 q->corrupt = r->probability;
733 init_crandom(&q->corrupt_cor, r->correlation);
c865e5d9
SH
734}
735
49545a77 736static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
7bc0f28c 737{
7bc0f28c
HPP
738 const struct tc_netem_rate *r = nla_data(attr);
739
740 q->rate = r->rate;
90b41a1c
HPP
741 q->packet_overhead = r->packet_overhead;
742 q->cell_size = r->cell_size;
809fa972 743 q->cell_overhead = r->cell_overhead;
90b41a1c
HPP
744 if (q->cell_size)
745 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
809fa972
HFS
746 else
747 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
7bc0f28c
HPP
748}
749
49545a77 750static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
661b7972 751{
661b7972 752 const struct nlattr *la;
753 int rem;
754
755 nla_for_each_nested(la, attr, rem) {
756 u16 type = nla_type(la);
757
833fa743 758 switch (type) {
661b7972 759 case NETEM_LOSS_GI: {
760 const struct tc_netem_gimodel *gi = nla_data(la);
761
2494654d 762 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
661b7972 763 pr_info("netem: incorrect gi model size\n");
764 return -EINVAL;
765 }
766
767 q->loss_model = CLG_4_STATES;
768
3fbac2a8 769 q->clg.state = TX_IN_GAP_PERIOD;
661b7972 770 q->clg.a1 = gi->p13;
771 q->clg.a2 = gi->p31;
772 q->clg.a3 = gi->p32;
773 q->clg.a4 = gi->p14;
774 q->clg.a5 = gi->p23;
775 break;
776 }
777
778 case NETEM_LOSS_GE: {
779 const struct tc_netem_gemodel *ge = nla_data(la);
780
2494654d 781 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
782 pr_info("netem: incorrect ge model size\n");
661b7972 783 return -EINVAL;
784 }
785
786 q->loss_model = CLG_GILB_ELL;
3fbac2a8 787 q->clg.state = GOOD_STATE;
661b7972 788 q->clg.a1 = ge->p;
789 q->clg.a2 = ge->r;
790 q->clg.a3 = ge->h;
791 q->clg.a4 = ge->k1;
792 break;
793 }
794
795 default:
796 pr_info("netem: unknown loss type %u\n", type);
797 return -EINVAL;
798 }
799 }
800
801 return 0;
802}
803
27a3421e
PM
804static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
805 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
806 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
807 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
7bc0f28c 808 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
661b7972 809 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
e4ae004b 810 [TCA_NETEM_ECN] = { .type = NLA_U32 },
6a031f67 811 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
27a3421e
PM
812};
813
2c10b32b
TG
814static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
815 const struct nla_policy *policy, int len)
816{
817 int nested_len = nla_len(nla) - NLA_ALIGN(len);
818
661b7972 819 if (nested_len < 0) {
820 pr_info("netem: invalid attributes len %d\n", nested_len);
2c10b32b 821 return -EINVAL;
661b7972 822 }
823
2c10b32b
TG
824 if (nested_len >= nla_attr_size(0))
825 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
826 nested_len, policy);
661b7972 827
2c10b32b
TG
828 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
829 return 0;
830}
831
c865e5d9 832/* Parse netlink message to set options */
1e90474c 833static int netem_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
834{
835 struct netem_sched_data *q = qdisc_priv(sch);
b03f4672 836 struct nlattr *tb[TCA_NETEM_MAX + 1];
1da177e4 837 struct tc_netem_qopt *qopt;
54a4b05c
YY
838 struct clgstate old_clg;
839 int old_loss_model = CLG_RANDOM;
1da177e4 840 int ret;
10297b99 841
b03f4672 842 if (opt == NULL)
1da177e4
LT
843 return -EINVAL;
844
2c10b32b
TG
845 qopt = nla_data(opt);
846 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
b03f4672
PM
847 if (ret < 0)
848 return ret;
849
54a4b05c
YY
850 /* backup q->clg and q->loss_model */
851 old_clg = q->clg;
852 old_loss_model = q->loss_model;
853
854 if (tb[TCA_NETEM_LOSS]) {
49545a77 855 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
54a4b05c
YY
856 if (ret) {
857 q->loss_model = old_loss_model;
858 return ret;
859 }
860 } else {
861 q->loss_model = CLG_RANDOM;
862 }
863
864 if (tb[TCA_NETEM_DELAY_DIST]) {
865 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
866 if (ret) {
867 /* recover clg and loss_model, in case of
868 * q->clg and q->loss_model were modified
869 * in get_loss_clg()
870 */
871 q->clg = old_clg;
872 q->loss_model = old_loss_model;
873 return ret;
874 }
875 }
876
50612537 877 sch->limit = qopt->limit;
10297b99 878
1da177e4
LT
879 q->latency = qopt->latency;
880 q->jitter = qopt->jitter;
881 q->limit = qopt->limit;
882 q->gap = qopt->gap;
0dca51d3 883 q->counter = 0;
1da177e4
LT
884 q->loss = qopt->loss;
885 q->duplicate = qopt->duplicate;
886
bb2f8cc0
SH
887 /* for compatibility with earlier versions.
888 * if gap is set, need to assume 100% probability
0dca51d3 889 */
a362e0a7
SH
890 if (q->gap)
891 q->reorder = ~0;
0dca51d3 892
265eb67f 893 if (tb[TCA_NETEM_CORR])
49545a77 894 get_correlation(q, tb[TCA_NETEM_CORR]);
1da177e4 895
265eb67f 896 if (tb[TCA_NETEM_REORDER])
49545a77 897 get_reorder(q, tb[TCA_NETEM_REORDER]);
1da177e4 898
265eb67f 899 if (tb[TCA_NETEM_CORRUPT])
49545a77 900 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1da177e4 901
7bc0f28c 902 if (tb[TCA_NETEM_RATE])
49545a77 903 get_rate(q, tb[TCA_NETEM_RATE]);
7bc0f28c 904
6a031f67
YY
905 if (tb[TCA_NETEM_RATE64])
906 q->rate = max_t(u64, q->rate,
907 nla_get_u64(tb[TCA_NETEM_RATE64]));
908
e4ae004b
ED
909 if (tb[TCA_NETEM_ECN])
910 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
911
661b7972 912 return ret;
1da177e4
LT
913}
914
1e90474c 915static int netem_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
916{
917 struct netem_sched_data *q = qdisc_priv(sch);
918 int ret;
919
920 if (!opt)
921 return -EINVAL;
922
59cb5c67 923 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4 924
661b7972 925 q->loss_model = CLG_RANDOM;
1da177e4 926 ret = netem_change(sch, opt);
50612537 927 if (ret)
250a65f7 928 pr_info("netem: change failed\n");
1da177e4
LT
929 return ret;
930}
931
932static void netem_destroy(struct Qdisc *sch)
933{
934 struct netem_sched_data *q = qdisc_priv(sch);
935
59cb5c67 936 qdisc_watchdog_cancel(&q->watchdog);
50612537
ED
937 if (q->qdisc)
938 qdisc_destroy(q->qdisc);
6373a9a2 939 dist_free(q->delay_dist);
1da177e4
LT
940}
941
661b7972 942static int dump_loss_model(const struct netem_sched_data *q,
943 struct sk_buff *skb)
944{
945 struct nlattr *nest;
946
947 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
948 if (nest == NULL)
949 goto nla_put_failure;
950
951 switch (q->loss_model) {
952 case CLG_RANDOM:
953 /* legacy loss model */
954 nla_nest_cancel(skb, nest);
955 return 0; /* no data */
956
957 case CLG_4_STATES: {
958 struct tc_netem_gimodel gi = {
959 .p13 = q->clg.a1,
960 .p31 = q->clg.a2,
961 .p32 = q->clg.a3,
962 .p14 = q->clg.a4,
963 .p23 = q->clg.a5,
964 };
965
1b34ec43
DM
966 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
967 goto nla_put_failure;
661b7972 968 break;
969 }
970 case CLG_GILB_ELL: {
971 struct tc_netem_gemodel ge = {
972 .p = q->clg.a1,
973 .r = q->clg.a2,
974 .h = q->clg.a3,
975 .k1 = q->clg.a4,
976 };
977
1b34ec43
DM
978 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
979 goto nla_put_failure;
661b7972 980 break;
981 }
982 }
983
984 nla_nest_end(skb, nest);
985 return 0;
986
987nla_put_failure:
988 nla_nest_cancel(skb, nest);
989 return -1;
990}
991
1da177e4
LT
992static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
993{
994 const struct netem_sched_data *q = qdisc_priv(sch);
861d7f74 995 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1da177e4
LT
996 struct tc_netem_qopt qopt;
997 struct tc_netem_corr cor;
0dca51d3 998 struct tc_netem_reorder reorder;
c865e5d9 999 struct tc_netem_corrupt corrupt;
7bc0f28c 1000 struct tc_netem_rate rate;
1da177e4
LT
1001
1002 qopt.latency = q->latency;
1003 qopt.jitter = q->jitter;
1004 qopt.limit = q->limit;
1005 qopt.loss = q->loss;
1006 qopt.gap = q->gap;
1007 qopt.duplicate = q->duplicate;
1b34ec43
DM
1008 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1009 goto nla_put_failure;
1da177e4
LT
1010
1011 cor.delay_corr = q->delay_cor.rho;
1012 cor.loss_corr = q->loss_cor.rho;
1013 cor.dup_corr = q->dup_cor.rho;
1b34ec43
DM
1014 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1015 goto nla_put_failure;
0dca51d3
SH
1016
1017 reorder.probability = q->reorder;
1018 reorder.correlation = q->reorder_cor.rho;
1b34ec43
DM
1019 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1020 goto nla_put_failure;
0dca51d3 1021
c865e5d9
SH
1022 corrupt.probability = q->corrupt;
1023 corrupt.correlation = q->corrupt_cor.rho;
1b34ec43
DM
1024 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1025 goto nla_put_failure;
c865e5d9 1026
6a031f67 1027 if (q->rate >= (1ULL << 32)) {
2a51c1e8
ND
1028 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1029 TCA_NETEM_PAD))
6a031f67
YY
1030 goto nla_put_failure;
1031 rate.rate = ~0U;
1032 } else {
1033 rate.rate = q->rate;
1034 }
90b41a1c
HPP
1035 rate.packet_overhead = q->packet_overhead;
1036 rate.cell_size = q->cell_size;
1037 rate.cell_overhead = q->cell_overhead;
1b34ec43
DM
1038 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1039 goto nla_put_failure;
7bc0f28c 1040
e4ae004b
ED
1041 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1042 goto nla_put_failure;
1043
661b7972 1044 if (dump_loss_model(q, skb) != 0)
1045 goto nla_put_failure;
1046
861d7f74 1047 return nla_nest_end(skb, nla);
1da177e4 1048
1e90474c 1049nla_put_failure:
861d7f74 1050 nlmsg_trim(skb, nla);
1da177e4
LT
1051 return -1;
1052}
1053
10f6dfcf 1054static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1055 struct sk_buff *skb, struct tcmsg *tcm)
1056{
1057 struct netem_sched_data *q = qdisc_priv(sch);
1058
50612537 1059 if (cl != 1 || !q->qdisc) /* only one class */
10f6dfcf 1060 return -ENOENT;
1061
1062 tcm->tcm_handle |= TC_H_MIN(1);
1063 tcm->tcm_info = q->qdisc->handle;
1064
1065 return 0;
1066}
1067
1068static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1069 struct Qdisc **old)
1070{
1071 struct netem_sched_data *q = qdisc_priv(sch);
1072
86a7996c 1073 *old = qdisc_replace(sch, new, &q->qdisc);
10f6dfcf 1074 return 0;
1075}
1076
1077static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1078{
1079 struct netem_sched_data *q = qdisc_priv(sch);
1080 return q->qdisc;
1081}
1082
1083static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1084{
1085 return 1;
1086}
1087
1088static void netem_put(struct Qdisc *sch, unsigned long arg)
1089{
1090}
1091
1092static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1093{
1094 if (!walker->stop) {
1095 if (walker->count >= walker->skip)
1096 if (walker->fn(sch, 1, walker) < 0) {
1097 walker->stop = 1;
1098 return;
1099 }
1100 walker->count++;
1101 }
1102}
1103
1104static const struct Qdisc_class_ops netem_class_ops = {
1105 .graft = netem_graft,
1106 .leaf = netem_leaf,
1107 .get = netem_get,
1108 .put = netem_put,
1109 .walk = netem_walk,
1110 .dump = netem_dump_class,
1111};
1112
20fea08b 1113static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1da177e4 1114 .id = "netem",
10f6dfcf 1115 .cl_ops = &netem_class_ops,
1da177e4
LT
1116 .priv_size = sizeof(struct netem_sched_data),
1117 .enqueue = netem_enqueue,
1118 .dequeue = netem_dequeue,
77be155c 1119 .peek = qdisc_peek_dequeued,
1da177e4
LT
1120 .init = netem_init,
1121 .reset = netem_reset,
1122 .destroy = netem_destroy,
1123 .change = netem_change,
1124 .dump = netem_dump,
1125 .owner = THIS_MODULE,
1126};
1127
1128
1129static int __init netem_module_init(void)
1130{
eb229c4c 1131 pr_info("netem: version " VERSION "\n");
1da177e4
LT
1132 return register_qdisc(&netem_qdisc_ops);
1133}
1134static void __exit netem_module_exit(void)
1135{
1136 unregister_qdisc(&netem_qdisc_ops);
1137}
1138module_init(netem_module_init)
1139module_exit(netem_module_exit)
1140MODULE_LICENSE("GPL");
This page took 0.997783 seconds and 5 git commands to generate.