[TCP]: cubic optimization
[deliverable/linux.git] / net / sched / sch_netem.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
798b6b19 7 * 2 of the License.
1da177e4
LT
8 *
9 * Many of the algorithms and ideas for this came from
10297b99 10 * NIST Net which is not copyrighted.
1da177e4
LT
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
1da177e4
LT
16#include <linux/module.h>
17#include <linux/bitops.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24
dc5fc579 25#include <net/netlink.h>
1da177e4
LT
26#include <net/pkt_sched.h>
27
c865e5d9 28#define VERSION "1.2"
eb229c4c 29
1da177e4
LT
30/* Network Emulation Queuing algorithm.
31 ====================================
32
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
36
37 ----------------------------------------------------------------
38
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
46
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
51
52 The simulator is limited by the Linux timer resolution
53 and will create packet bursts on the HZ boundary (1ms).
54*/
55
56struct netem_sched_data {
57 struct Qdisc *qdisc;
59cb5c67 58 struct qdisc_watchdog watchdog;
1da177e4
LT
59
60 u32 latency;
61 u32 loss;
62 u32 limit;
63 u32 counter;
64 u32 gap;
65 u32 jitter;
66 u32 duplicate;
0dca51d3 67 u32 reorder;
c865e5d9 68 u32 corrupt;
1da177e4
LT
69
70 struct crndstate {
71 unsigned long last;
72 unsigned long rho;
c865e5d9 73 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
1da177e4
LT
74
75 struct disttable {
76 u32 size;
77 s16 table[0];
78 } *delay_dist;
79};
80
81/* Time stamp put into socket buffer control block */
82struct netem_skb_cb {
83 psched_time_t time_to_send;
84};
85
86/* init_crandom - initialize correlated random number generator
87 * Use entropy source for initial seed.
88 */
89static void init_crandom(struct crndstate *state, unsigned long rho)
90{
91 state->rho = rho;
92 state->last = net_random();
93}
94
95/* get_crandom - correlated random number generator
96 * Next number depends on last value.
97 * rho is scaled to avoid floating point.
98 */
99static unsigned long get_crandom(struct crndstate *state)
100{
101 u64 value, rho;
102 unsigned long answer;
103
104 if (state->rho == 0) /* no correllation */
105 return net_random();
106
107 value = net_random();
108 rho = (u64)state->rho + 1;
109 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
110 state->last = answer;
111 return answer;
112}
113
114/* tabledist - return a pseudo-randomly distributed value with mean mu and
115 * std deviation sigma. Uses table lookup to approximate the desired
116 * distribution, and a uniformly-distributed pseudo-random source.
117 */
10297b99 118static long tabledist(unsigned long mu, long sigma,
1da177e4
LT
119 struct crndstate *state, const struct disttable *dist)
120{
121 long t, x;
122 unsigned long rnd;
123
124 if (sigma == 0)
125 return mu;
126
127 rnd = get_crandom(state);
128
129 /* default uniform distribution */
10297b99 130 if (dist == NULL)
1da177e4
LT
131 return (rnd % (2*sigma)) - sigma + mu;
132
133 t = dist->table[rnd % dist->size];
134 x = (sigma % NETEM_DIST_SCALE) * t;
135 if (x >= 0)
136 x += NETEM_DIST_SCALE/2;
137 else
138 x -= NETEM_DIST_SCALE/2;
139
140 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
141}
142
0afb51e7
SH
143/*
144 * Insert one skb into qdisc.
145 * Note: parent depends on return value to account for queue length.
146 * NET_XMIT_DROP: queue length didn't change.
147 * NET_XMIT_SUCCESS: one skb was queued.
148 */
1da177e4
LT
149static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
150{
151 struct netem_sched_data *q = qdisc_priv(sch);
89e1df74
GC
152 /* We don't fill cb now as skb_unshare() may invalidate it */
153 struct netem_skb_cb *cb;
0afb51e7 154 struct sk_buff *skb2;
1da177e4 155 int ret;
0afb51e7 156 int count = 1;
1da177e4 157
771018e7 158 pr_debug("netem_enqueue skb=%p\n", skb);
1da177e4 159
0afb51e7
SH
160 /* Random duplication */
161 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
162 ++count;
163
1da177e4 164 /* Random packet drop 0 => none, ~0 => all */
0afb51e7
SH
165 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
166 --count;
167
168 if (count == 0) {
1da177e4
LT
169 sch->qstats.drops++;
170 kfree_skb(skb);
89bbb0a3 171 return NET_XMIT_BYPASS;
1da177e4
LT
172 }
173
4e8a5201
DM
174 skb_orphan(skb);
175
0afb51e7
SH
176 /*
177 * If we need to duplicate packet, then re-insert at top of the
178 * qdisc tree, since parent queuer expects that only one
179 * skb will be queued.
180 */
181 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
182 struct Qdisc *rootq = sch->dev->qdisc;
183 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
184 q->duplicate = 0;
185
186 rootq->enqueue(skb2, rootq);
187 q->duplicate = dupsave;
1da177e4
LT
188 }
189
c865e5d9
SH
190 /*
191 * Randomized packet corruption.
192 * Make copy if needed since we are modifying
193 * If packet is going to be hardware checksummed, then
194 * do it now in software before we mangle it.
195 */
196 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
197 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
84fa7933
PM
198 || (skb->ip_summed == CHECKSUM_PARTIAL
199 && skb_checksum_help(skb))) {
c865e5d9
SH
200 sch->qstats.drops++;
201 return NET_XMIT_DROP;
202 }
203
204 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
205 }
206
89e1df74 207 cb = (struct netem_skb_cb *)skb->cb;
0dca51d3
SH
208 if (q->gap == 0 /* not doing reordering */
209 || q->counter < q->gap /* inside last reordering gap */
210 || q->reorder < get_crandom(&q->reorder_cor)) {
0f9f32ac 211 psched_time_t now;
07aaa115
SH
212 psched_tdiff_t delay;
213
214 delay = tabledist(q->latency, q->jitter,
215 &q->delay_cor, q->delay_dist);
216
0f9f32ac 217 PSCHED_GET_TIME(now);
07aaa115 218 PSCHED_TADD2(now, delay, cb->time_to_send);
1da177e4
LT
219 ++q->counter;
220 ret = q->qdisc->enqueue(skb, q->qdisc);
221 } else {
10297b99 222 /*
0dca51d3
SH
223 * Do re-ordering by putting one out of N packets at the front
224 * of the queue.
225 */
0f9f32ac 226 PSCHED_GET_TIME(cb->time_to_send);
0dca51d3 227 q->counter = 0;
0f9f32ac 228 ret = q->qdisc->ops->requeue(skb, q->qdisc);
1da177e4
LT
229 }
230
231 if (likely(ret == NET_XMIT_SUCCESS)) {
232 sch->q.qlen++;
233 sch->bstats.bytes += skb->len;
234 sch->bstats.packets++;
235 } else
236 sch->qstats.drops++;
237
d5d75cd6 238 pr_debug("netem: enqueue ret %d\n", ret);
1da177e4
LT
239 return ret;
240}
241
242/* Requeue packets but don't change time stamp */
243static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
244{
245 struct netem_sched_data *q = qdisc_priv(sch);
246 int ret;
247
248 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
249 sch->q.qlen++;
250 sch->qstats.requeues++;
251 }
252
253 return ret;
254}
255
256static unsigned int netem_drop(struct Qdisc* sch)
257{
258 struct netem_sched_data *q = qdisc_priv(sch);
6d037a26 259 unsigned int len = 0;
1da177e4 260
6d037a26 261 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
1da177e4
LT
262 sch->q.qlen--;
263 sch->qstats.drops++;
264 }
265 return len;
266}
267
1da177e4
LT
268static struct sk_buff *netem_dequeue(struct Qdisc *sch)
269{
270 struct netem_sched_data *q = qdisc_priv(sch);
271 struct sk_buff *skb;
272
273 skb = q->qdisc->dequeue(q->qdisc);
771018e7 274 if (skb) {
0f9f32ac
SH
275 const struct netem_skb_cb *cb
276 = (const struct netem_skb_cb *)skb->cb;
277 psched_time_t now;
0f9f32ac
SH
278
279 /* if more time remaining? */
280 PSCHED_GET_TIME(now);
07aaa115
SH
281
282 if (PSCHED_TLESS(cb->time_to_send, now)) {
0f9f32ac
SH
283 pr_debug("netem_dequeue: return skb=%p\n", skb);
284 sch->q.qlen--;
285 sch->flags &= ~TCQ_F_THROTTLED;
286 return skb;
07aaa115 287 } else {
59cb5c67 288 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
0f9f32ac 289
07aaa115 290 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
e488eafc 291 qdisc_tree_decrease_qlen(q->qdisc, 1);
07aaa115 292 sch->qstats.drops++;
07aaa115
SH
293 printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
294 q->qdisc->ops->id);
07aaa115 295 }
07aaa115 296 }
0f9f32ac
SH
297 }
298
299 return NULL;
1da177e4
LT
300}
301
1da177e4
LT
302static void netem_reset(struct Qdisc *sch)
303{
304 struct netem_sched_data *q = qdisc_priv(sch);
305
306 qdisc_reset(q->qdisc);
1da177e4 307 sch->q.qlen = 0;
59cb5c67 308 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
309}
310
300ce174 311/* Pass size change message down to embedded FIFO */
1da177e4
LT
312static int set_fifo_limit(struct Qdisc *q, int limit)
313{
10297b99 314 struct rtattr *rta;
1da177e4
LT
315 int ret = -ENOMEM;
316
300ce174
SH
317 /* Hack to avoid sending change message to non-FIFO */
318 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
319 return 0;
320
1da177e4
LT
321 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
322 if (rta) {
323 rta->rta_type = RTM_NEWQDISC;
10297b99 324 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
1da177e4 325 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
10297b99 326
1da177e4
LT
327 ret = q->ops->change(q, rta);
328 kfree(rta);
329 }
330 return ret;
331}
332
333/*
334 * Distribution data is a variable size payload containing
335 * signed 16 bit values.
336 */
337static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
338{
339 struct netem_sched_data *q = qdisc_priv(sch);
340 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
341 const __s16 *data = RTA_DATA(attr);
342 struct disttable *d;
343 int i;
344
345 if (n > 65536)
346 return -EINVAL;
347
348 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
349 if (!d)
350 return -ENOMEM;
351
352 d->size = n;
353 for (i = 0; i < n; i++)
354 d->table[i] = data[i];
10297b99 355
1da177e4
LT
356 spin_lock_bh(&sch->dev->queue_lock);
357 d = xchg(&q->delay_dist, d);
358 spin_unlock_bh(&sch->dev->queue_lock);
359
360 kfree(d);
361 return 0;
362}
363
364static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
365{
366 struct netem_sched_data *q = qdisc_priv(sch);
367 const struct tc_netem_corr *c = RTA_DATA(attr);
368
369 if (RTA_PAYLOAD(attr) != sizeof(*c))
370 return -EINVAL;
371
372 init_crandom(&q->delay_cor, c->delay_corr);
373 init_crandom(&q->loss_cor, c->loss_corr);
374 init_crandom(&q->dup_cor, c->dup_corr);
375 return 0;
376}
377
0dca51d3
SH
378static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
379{
380 struct netem_sched_data *q = qdisc_priv(sch);
381 const struct tc_netem_reorder *r = RTA_DATA(attr);
382
383 if (RTA_PAYLOAD(attr) != sizeof(*r))
384 return -EINVAL;
385
386 q->reorder = r->probability;
387 init_crandom(&q->reorder_cor, r->correlation);
388 return 0;
389}
390
c865e5d9
SH
391static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
392{
393 struct netem_sched_data *q = qdisc_priv(sch);
394 const struct tc_netem_corrupt *r = RTA_DATA(attr);
395
396 if (RTA_PAYLOAD(attr) != sizeof(*r))
397 return -EINVAL;
398
399 q->corrupt = r->probability;
400 init_crandom(&q->corrupt_cor, r->correlation);
401 return 0;
402}
403
404/* Parse netlink message to set options */
1da177e4
LT
405static int netem_change(struct Qdisc *sch, struct rtattr *opt)
406{
407 struct netem_sched_data *q = qdisc_priv(sch);
408 struct tc_netem_qopt *qopt;
409 int ret;
10297b99 410
1da177e4
LT
411 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
412 return -EINVAL;
413
414 qopt = RTA_DATA(opt);
415 ret = set_fifo_limit(q->qdisc, qopt->limit);
416 if (ret) {
417 pr_debug("netem: can't set fifo limit\n");
418 return ret;
419 }
10297b99 420
1da177e4
LT
421 q->latency = qopt->latency;
422 q->jitter = qopt->jitter;
423 q->limit = qopt->limit;
424 q->gap = qopt->gap;
0dca51d3 425 q->counter = 0;
1da177e4
LT
426 q->loss = qopt->loss;
427 q->duplicate = qopt->duplicate;
428
0dca51d3
SH
429 /* for compatiablity with earlier versions.
430 * if gap is set, need to assume 100% probablity
431 */
432 q->reorder = ~0;
433
1da177e4
LT
434 /* Handle nested options after initial queue options.
435 * Should have put all options in nested format but too late now.
10297b99 436 */
1da177e4
LT
437 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
438 struct rtattr *tb[TCA_NETEM_MAX];
10297b99 439 if (rtattr_parse(tb, TCA_NETEM_MAX,
1da177e4
LT
440 RTA_DATA(opt) + sizeof(*qopt),
441 RTA_PAYLOAD(opt) - sizeof(*qopt)))
442 return -EINVAL;
443
444 if (tb[TCA_NETEM_CORR-1]) {
445 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
446 if (ret)
447 return ret;
448 }
449
450 if (tb[TCA_NETEM_DELAY_DIST-1]) {
451 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
452 if (ret)
453 return ret;
454 }
c865e5d9 455
0dca51d3
SH
456 if (tb[TCA_NETEM_REORDER-1]) {
457 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
458 if (ret)
459 return ret;
460 }
1da177e4 461
c865e5d9
SH
462 if (tb[TCA_NETEM_CORRUPT-1]) {
463 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
464 if (ret)
465 return ret;
466 }
467 }
1da177e4
LT
468
469 return 0;
470}
471
300ce174
SH
472/*
473 * Special case version of FIFO queue for use by netem.
474 * It queues in order based on timestamps in skb's
475 */
476struct fifo_sched_data {
477 u32 limit;
478};
479
480static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
481{
482 struct fifo_sched_data *q = qdisc_priv(sch);
483 struct sk_buff_head *list = &sch->q;
484 const struct netem_skb_cb *ncb
485 = (const struct netem_skb_cb *)nskb->cb;
486 struct sk_buff *skb;
487
488 if (likely(skb_queue_len(list) < q->limit)) {
489 skb_queue_reverse_walk(list, skb) {
490 const struct netem_skb_cb *cb
491 = (const struct netem_skb_cb *)skb->cb;
492
aa875166 493 if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send))
300ce174
SH
494 break;
495 }
496
497 __skb_queue_after(list, skb, nskb);
498
499 sch->qstats.backlog += nskb->len;
500 sch->bstats.bytes += nskb->len;
501 sch->bstats.packets++;
502
503 return NET_XMIT_SUCCESS;
504 }
505
506 return qdisc_drop(nskb, sch);
507}
508
509static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
510{
511 struct fifo_sched_data *q = qdisc_priv(sch);
512
513 if (opt) {
514 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
515 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
516 return -EINVAL;
517
518 q->limit = ctl->limit;
519 } else
520 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
521
522 return 0;
523}
524
525static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
526{
527 struct fifo_sched_data *q = qdisc_priv(sch);
528 struct tc_fifo_qopt opt = { .limit = q->limit };
529
530 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
531 return skb->len;
532
533rtattr_failure:
534 return -1;
535}
536
537static struct Qdisc_ops tfifo_qdisc_ops = {
538 .id = "tfifo",
539 .priv_size = sizeof(struct fifo_sched_data),
540 .enqueue = tfifo_enqueue,
541 .dequeue = qdisc_dequeue_head,
542 .requeue = qdisc_requeue,
543 .drop = qdisc_queue_drop,
544 .init = tfifo_init,
545 .reset = qdisc_reset_queue,
546 .change = tfifo_init,
547 .dump = tfifo_dump,
548};
549
1da177e4
LT
550static int netem_init(struct Qdisc *sch, struct rtattr *opt)
551{
552 struct netem_sched_data *q = qdisc_priv(sch);
553 int ret;
554
555 if (!opt)
556 return -EINVAL;
557
59cb5c67 558 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4 559
9f9afec4
PM
560 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
561 TC_H_MAKE(sch->handle, 1));
1da177e4
LT
562 if (!q->qdisc) {
563 pr_debug("netem: qdisc create failed\n");
564 return -ENOMEM;
565 }
566
567 ret = netem_change(sch, opt);
568 if (ret) {
569 pr_debug("netem: change failed\n");
570 qdisc_destroy(q->qdisc);
571 }
572 return ret;
573}
574
575static void netem_destroy(struct Qdisc *sch)
576{
577 struct netem_sched_data *q = qdisc_priv(sch);
578
59cb5c67 579 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
580 qdisc_destroy(q->qdisc);
581 kfree(q->delay_dist);
582}
583
584static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
585{
586 const struct netem_sched_data *q = qdisc_priv(sch);
27a884dc 587 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
588 struct rtattr *rta = (struct rtattr *) b;
589 struct tc_netem_qopt qopt;
590 struct tc_netem_corr cor;
0dca51d3 591 struct tc_netem_reorder reorder;
c865e5d9 592 struct tc_netem_corrupt corrupt;
1da177e4
LT
593
594 qopt.latency = q->latency;
595 qopt.jitter = q->jitter;
596 qopt.limit = q->limit;
597 qopt.loss = q->loss;
598 qopt.gap = q->gap;
599 qopt.duplicate = q->duplicate;
600 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
601
602 cor.delay_corr = q->delay_cor.rho;
603 cor.loss_corr = q->loss_cor.rho;
604 cor.dup_corr = q->dup_cor.rho;
605 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
0dca51d3
SH
606
607 reorder.probability = q->reorder;
608 reorder.correlation = q->reorder_cor.rho;
609 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
610
c865e5d9
SH
611 corrupt.probability = q->corrupt;
612 corrupt.correlation = q->corrupt_cor.rho;
613 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
614
27a884dc 615 rta->rta_len = skb_tail_pointer(skb) - b;
1da177e4
LT
616
617 return skb->len;
618
619rtattr_failure:
dc5fc579 620 nlmsg_trim(skb, b);
1da177e4
LT
621 return -1;
622}
623
624static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
625 struct sk_buff *skb, struct tcmsg *tcm)
626{
627 struct netem_sched_data *q = qdisc_priv(sch);
628
629 if (cl != 1) /* only one class */
630 return -ENOENT;
631
632 tcm->tcm_handle |= TC_H_MIN(1);
633 tcm->tcm_info = q->qdisc->handle;
634
635 return 0;
636}
637
638static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
639 struct Qdisc **old)
640{
641 struct netem_sched_data *q = qdisc_priv(sch);
642
643 if (new == NULL)
644 new = &noop_qdisc;
645
646 sch_tree_lock(sch);
647 *old = xchg(&q->qdisc, new);
5e50da01 648 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1da177e4 649 qdisc_reset(*old);
1da177e4
LT
650 sch_tree_unlock(sch);
651
652 return 0;
653}
654
655static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
656{
657 struct netem_sched_data *q = qdisc_priv(sch);
658 return q->qdisc;
659}
660
661static unsigned long netem_get(struct Qdisc *sch, u32 classid)
662{
663 return 1;
664}
665
666static void netem_put(struct Qdisc *sch, unsigned long arg)
667{
668}
669
10297b99 670static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1da177e4
LT
671 struct rtattr **tca, unsigned long *arg)
672{
673 return -ENOSYS;
674}
675
676static int netem_delete(struct Qdisc *sch, unsigned long arg)
677{
678 return -ENOSYS;
679}
680
681static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
682{
683 if (!walker->stop) {
684 if (walker->count >= walker->skip)
685 if (walker->fn(sch, 1, walker) < 0) {
686 walker->stop = 1;
687 return;
688 }
689 walker->count++;
690 }
691}
692
693static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
694{
695 return NULL;
696}
697
698static struct Qdisc_class_ops netem_class_ops = {
699 .graft = netem_graft,
700 .leaf = netem_leaf,
701 .get = netem_get,
702 .put = netem_put,
703 .change = netem_change_class,
704 .delete = netem_delete,
705 .walk = netem_walk,
706 .tcf_chain = netem_find_tcf,
707 .dump = netem_dump_class,
708};
709
710static struct Qdisc_ops netem_qdisc_ops = {
711 .id = "netem",
712 .cl_ops = &netem_class_ops,
713 .priv_size = sizeof(struct netem_sched_data),
714 .enqueue = netem_enqueue,
715 .dequeue = netem_dequeue,
716 .requeue = netem_requeue,
717 .drop = netem_drop,
718 .init = netem_init,
719 .reset = netem_reset,
720 .destroy = netem_destroy,
721 .change = netem_change,
722 .dump = netem_dump,
723 .owner = THIS_MODULE,
724};
725
726
727static int __init netem_module_init(void)
728{
eb229c4c 729 pr_info("netem: version " VERSION "\n");
1da177e4
LT
730 return register_qdisc(&netem_qdisc_ops);
731}
732static void __exit netem_module_exit(void)
733{
734 unregister_qdisc(&netem_qdisc_ops);
735}
736module_init(netem_module_init)
737module_exit(netem_module_exit)
738MODULE_LICENSE("GPL");
This page took 0.28803 seconds and 5 git commands to generate.