netdev: Create netdev_queue abstraction.
[deliverable/linux.git] / net / sched / sch_netem.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
798b6b19 7 * 2 of the License.
1da177e4
LT
8 *
9 * Many of the algorithms and ideas for this came from
10297b99 10 * NIST Net which is not copyrighted.
1da177e4
LT
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
1da177e4 16#include <linux/module.h>
1da177e4
LT
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/errno.h>
1da177e4
LT
20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
22
dc5fc579 23#include <net/netlink.h>
1da177e4
LT
24#include <net/pkt_sched.h>
25
c865e5d9 26#define VERSION "1.2"
eb229c4c 27
1da177e4
LT
28/* Network Emulation Queuing algorithm.
29 ====================================
30
31 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 Network Emulation Tool
33 [2] Luigi Rizzo, DummyNet for FreeBSD
34
35 ----------------------------------------------------------------
36
37 This started out as a simple way to delay outgoing packets to
38 test TCP but has grown to include most of the functionality
39 of a full blown network emulator like NISTnet. It can delay
40 packets and add random jitter (and correlation). The random
41 distribution can be loaded from a table as well to provide
42 normal, Pareto, or experimental curves. Packet loss,
43 duplication, and reordering can also be emulated.
44
45 This qdisc does not do classification that can be handled in
46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token
48 bucket or other rate control.
49
50 The simulator is limited by the Linux timer resolution
51 and will create packet bursts on the HZ boundary (1ms).
52*/
53
54struct netem_sched_data {
55 struct Qdisc *qdisc;
59cb5c67 56 struct qdisc_watchdog watchdog;
1da177e4 57
b407621c
SH
58 psched_tdiff_t latency;
59 psched_tdiff_t jitter;
60
1da177e4
LT
61 u32 loss;
62 u32 limit;
63 u32 counter;
64 u32 gap;
1da177e4 65 u32 duplicate;
0dca51d3 66 u32 reorder;
c865e5d9 67 u32 corrupt;
1da177e4
LT
68
69 struct crndstate {
b407621c
SH
70 u32 last;
71 u32 rho;
c865e5d9 72 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
1da177e4
LT
73
74 struct disttable {
75 u32 size;
76 s16 table[0];
77 } *delay_dist;
78};
79
80/* Time stamp put into socket buffer control block */
81struct netem_skb_cb {
82 psched_time_t time_to_send;
83};
84
85/* init_crandom - initialize correlated random number generator
86 * Use entropy source for initial seed.
87 */
88static void init_crandom(struct crndstate *state, unsigned long rho)
89{
90 state->rho = rho;
91 state->last = net_random();
92}
93
94/* get_crandom - correlated random number generator
95 * Next number depends on last value.
96 * rho is scaled to avoid floating point.
97 */
b407621c 98static u32 get_crandom(struct crndstate *state)
1da177e4
LT
99{
100 u64 value, rho;
101 unsigned long answer;
102
bb2f8cc0 103 if (state->rho == 0) /* no correlation */
1da177e4
LT
104 return net_random();
105
106 value = net_random();
107 rho = (u64)state->rho + 1;
108 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
109 state->last = answer;
110 return answer;
111}
112
113/* tabledist - return a pseudo-randomly distributed value with mean mu and
114 * std deviation sigma. Uses table lookup to approximate the desired
115 * distribution, and a uniformly-distributed pseudo-random source.
116 */
b407621c
SH
117static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
118 struct crndstate *state,
119 const struct disttable *dist)
1da177e4 120{
b407621c
SH
121 psched_tdiff_t x;
122 long t;
123 u32 rnd;
1da177e4
LT
124
125 if (sigma == 0)
126 return mu;
127
128 rnd = get_crandom(state);
129
130 /* default uniform distribution */
10297b99 131 if (dist == NULL)
1da177e4
LT
132 return (rnd % (2*sigma)) - sigma + mu;
133
134 t = dist->table[rnd % dist->size];
135 x = (sigma % NETEM_DIST_SCALE) * t;
136 if (x >= 0)
137 x += NETEM_DIST_SCALE/2;
138 else
139 x -= NETEM_DIST_SCALE/2;
140
141 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
142}
143
0afb51e7
SH
144/*
145 * Insert one skb into qdisc.
146 * Note: parent depends on return value to account for queue length.
147 * NET_XMIT_DROP: queue length didn't change.
148 * NET_XMIT_SUCCESS: one skb was queued.
149 */
1da177e4
LT
150static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
151{
152 struct netem_sched_data *q = qdisc_priv(sch);
89e1df74
GC
153 /* We don't fill cb now as skb_unshare() may invalidate it */
154 struct netem_skb_cb *cb;
0afb51e7 155 struct sk_buff *skb2;
1da177e4 156 int ret;
0afb51e7 157 int count = 1;
1da177e4 158
771018e7 159 pr_debug("netem_enqueue skb=%p\n", skb);
1da177e4 160
0afb51e7
SH
161 /* Random duplication */
162 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
163 ++count;
164
1da177e4 165 /* Random packet drop 0 => none, ~0 => all */
0afb51e7
SH
166 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
167 --count;
168
169 if (count == 0) {
1da177e4
LT
170 sch->qstats.drops++;
171 kfree_skb(skb);
89bbb0a3 172 return NET_XMIT_BYPASS;
1da177e4
LT
173 }
174
4e8a5201
DM
175 skb_orphan(skb);
176
0afb51e7
SH
177 /*
178 * If we need to duplicate packet, then re-insert at top of the
179 * qdisc tree, since parent queuer expects that only one
180 * skb will be queued.
181 */
182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183 struct Qdisc *rootq = sch->dev->qdisc;
184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 q->duplicate = 0;
186
187 rootq->enqueue(skb2, rootq);
188 q->duplicate = dupsave;
1da177e4
LT
189 }
190
c865e5d9
SH
191 /*
192 * Randomized packet corruption.
193 * Make copy if needed since we are modifying
194 * If packet is going to be hardware checksummed, then
195 * do it now in software before we mangle it.
196 */
197 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
198 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
84fa7933
PM
199 || (skb->ip_summed == CHECKSUM_PARTIAL
200 && skb_checksum_help(skb))) {
c865e5d9
SH
201 sch->qstats.drops++;
202 return NET_XMIT_DROP;
203 }
204
205 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
206 }
207
89e1df74 208 cb = (struct netem_skb_cb *)skb->cb;
0dca51d3
SH
209 if (q->gap == 0 /* not doing reordering */
210 || q->counter < q->gap /* inside last reordering gap */
211 || q->reorder < get_crandom(&q->reorder_cor)) {
0f9f32ac 212 psched_time_t now;
07aaa115
SH
213 psched_tdiff_t delay;
214
215 delay = tabledist(q->latency, q->jitter,
216 &q->delay_cor, q->delay_dist);
217
3bebcda2 218 now = psched_get_time();
7c59e25f 219 cb->time_to_send = now + delay;
1da177e4
LT
220 ++q->counter;
221 ret = q->qdisc->enqueue(skb, q->qdisc);
222 } else {
10297b99 223 /*
0dca51d3
SH
224 * Do re-ordering by putting one out of N packets at the front
225 * of the queue.
226 */
3bebcda2 227 cb->time_to_send = psched_get_time();
0dca51d3 228 q->counter = 0;
0f9f32ac 229 ret = q->qdisc->ops->requeue(skb, q->qdisc);
1da177e4
LT
230 }
231
232 if (likely(ret == NET_XMIT_SUCCESS)) {
233 sch->q.qlen++;
234 sch->bstats.bytes += skb->len;
235 sch->bstats.packets++;
236 } else
237 sch->qstats.drops++;
238
d5d75cd6 239 pr_debug("netem: enqueue ret %d\n", ret);
1da177e4
LT
240 return ret;
241}
242
243/* Requeue packets but don't change time stamp */
244static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
245{
246 struct netem_sched_data *q = qdisc_priv(sch);
247 int ret;
248
249 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
250 sch->q.qlen++;
251 sch->qstats.requeues++;
252 }
253
254 return ret;
255}
256
257static unsigned int netem_drop(struct Qdisc* sch)
258{
259 struct netem_sched_data *q = qdisc_priv(sch);
6d037a26 260 unsigned int len = 0;
1da177e4 261
6d037a26 262 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
1da177e4
LT
263 sch->q.qlen--;
264 sch->qstats.drops++;
265 }
266 return len;
267}
268
1da177e4
LT
269static struct sk_buff *netem_dequeue(struct Qdisc *sch)
270{
271 struct netem_sched_data *q = qdisc_priv(sch);
272 struct sk_buff *skb;
273
11274e5a
SH
274 smp_mb();
275 if (sch->flags & TCQ_F_THROTTLED)
276 return NULL;
277
1da177e4 278 skb = q->qdisc->dequeue(q->qdisc);
771018e7 279 if (skb) {
0f9f32ac
SH
280 const struct netem_skb_cb *cb
281 = (const struct netem_skb_cb *)skb->cb;
3bebcda2 282 psched_time_t now = psched_get_time();
0f9f32ac
SH
283
284 /* if more time remaining? */
104e0878 285 if (cb->time_to_send <= now) {
0f9f32ac
SH
286 pr_debug("netem_dequeue: return skb=%p\n", skb);
287 sch->q.qlen--;
0f9f32ac 288 return skb;
07aaa115 289 }
11274e5a
SH
290
291 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
292 qdisc_tree_decrease_qlen(q->qdisc, 1);
293 sch->qstats.drops++;
294 printk(KERN_ERR "netem: %s could not requeue\n",
295 q->qdisc->ops->id);
296 }
297
298 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
0f9f32ac
SH
299 }
300
301 return NULL;
1da177e4
LT
302}
303
1da177e4
LT
304static void netem_reset(struct Qdisc *sch)
305{
306 struct netem_sched_data *q = qdisc_priv(sch);
307
308 qdisc_reset(q->qdisc);
1da177e4 309 sch->q.qlen = 0;
59cb5c67 310 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
311}
312
1da177e4
LT
313/*
314 * Distribution data is a variable size payload containing
315 * signed 16 bit values.
316 */
1e90474c 317static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
1da177e4
LT
318{
319 struct netem_sched_data *q = qdisc_priv(sch);
1e90474c
PM
320 unsigned long n = nla_len(attr)/sizeof(__s16);
321 const __s16 *data = nla_data(attr);
1da177e4
LT
322 struct disttable *d;
323 int i;
324
325 if (n > 65536)
326 return -EINVAL;
327
328 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
329 if (!d)
330 return -ENOMEM;
331
332 d->size = n;
333 for (i = 0; i < n; i++)
334 d->table[i] = data[i];
10297b99 335
1da177e4
LT
336 spin_lock_bh(&sch->dev->queue_lock);
337 d = xchg(&q->delay_dist, d);
338 spin_unlock_bh(&sch->dev->queue_lock);
339
340 kfree(d);
341 return 0;
342}
343
1e90474c 344static int get_correlation(struct Qdisc *sch, const struct nlattr *attr)
1da177e4
LT
345{
346 struct netem_sched_data *q = qdisc_priv(sch);
1e90474c 347 const struct tc_netem_corr *c = nla_data(attr);
1da177e4 348
1da177e4
LT
349 init_crandom(&q->delay_cor, c->delay_corr);
350 init_crandom(&q->loss_cor, c->loss_corr);
351 init_crandom(&q->dup_cor, c->dup_corr);
352 return 0;
353}
354
1e90474c 355static int get_reorder(struct Qdisc *sch, const struct nlattr *attr)
0dca51d3
SH
356{
357 struct netem_sched_data *q = qdisc_priv(sch);
1e90474c 358 const struct tc_netem_reorder *r = nla_data(attr);
0dca51d3 359
0dca51d3
SH
360 q->reorder = r->probability;
361 init_crandom(&q->reorder_cor, r->correlation);
362 return 0;
363}
364
1e90474c 365static int get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
c865e5d9
SH
366{
367 struct netem_sched_data *q = qdisc_priv(sch);
1e90474c 368 const struct tc_netem_corrupt *r = nla_data(attr);
c865e5d9 369
c865e5d9
SH
370 q->corrupt = r->probability;
371 init_crandom(&q->corrupt_cor, r->correlation);
372 return 0;
373}
374
27a3421e
PM
375static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
376 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
377 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
378 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
379};
380
c865e5d9 381/* Parse netlink message to set options */
1e90474c 382static int netem_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
383{
384 struct netem_sched_data *q = qdisc_priv(sch);
b03f4672 385 struct nlattr *tb[TCA_NETEM_MAX + 1];
1da177e4
LT
386 struct tc_netem_qopt *qopt;
387 int ret;
10297b99 388
b03f4672 389 if (opt == NULL)
1da177e4
LT
390 return -EINVAL;
391
27a3421e
PM
392 ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy,
393 qopt, sizeof(*qopt));
b03f4672
PM
394 if (ret < 0)
395 return ret;
396
fb0305ce 397 ret = fifo_set_limit(q->qdisc, qopt->limit);
1da177e4
LT
398 if (ret) {
399 pr_debug("netem: can't set fifo limit\n");
400 return ret;
401 }
10297b99 402
1da177e4
LT
403 q->latency = qopt->latency;
404 q->jitter = qopt->jitter;
405 q->limit = qopt->limit;
406 q->gap = qopt->gap;
0dca51d3 407 q->counter = 0;
1da177e4
LT
408 q->loss = qopt->loss;
409 q->duplicate = qopt->duplicate;
410
bb2f8cc0
SH
411 /* for compatibility with earlier versions.
412 * if gap is set, need to assume 100% probability
0dca51d3 413 */
a362e0a7
SH
414 if (q->gap)
415 q->reorder = ~0;
0dca51d3 416
b03f4672
PM
417 if (tb[TCA_NETEM_CORR]) {
418 ret = get_correlation(sch, tb[TCA_NETEM_CORR]);
419 if (ret)
420 return ret;
421 }
1da177e4 422
b03f4672
PM
423 if (tb[TCA_NETEM_DELAY_DIST]) {
424 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
425 if (ret)
426 return ret;
427 }
c865e5d9 428
b03f4672
PM
429 if (tb[TCA_NETEM_REORDER]) {
430 ret = get_reorder(sch, tb[TCA_NETEM_REORDER]);
431 if (ret)
432 return ret;
433 }
1da177e4 434
b03f4672
PM
435 if (tb[TCA_NETEM_CORRUPT]) {
436 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
437 if (ret)
438 return ret;
c865e5d9 439 }
1da177e4
LT
440
441 return 0;
442}
443
300ce174
SH
444/*
445 * Special case version of FIFO queue for use by netem.
446 * It queues in order based on timestamps in skb's
447 */
448struct fifo_sched_data {
449 u32 limit;
075aa573 450 psched_time_t oldest;
300ce174
SH
451};
452
453static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
454{
455 struct fifo_sched_data *q = qdisc_priv(sch);
456 struct sk_buff_head *list = &sch->q;
075aa573 457 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
300ce174
SH
458 struct sk_buff *skb;
459
460 if (likely(skb_queue_len(list) < q->limit)) {
075aa573 461 /* Optimize for add at tail */
104e0878 462 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
075aa573
SH
463 q->oldest = tnext;
464 return qdisc_enqueue_tail(nskb, sch);
465 }
466
300ce174
SH
467 skb_queue_reverse_walk(list, skb) {
468 const struct netem_skb_cb *cb
469 = (const struct netem_skb_cb *)skb->cb;
470
104e0878 471 if (tnext >= cb->time_to_send)
300ce174
SH
472 break;
473 }
474
475 __skb_queue_after(list, skb, nskb);
476
477 sch->qstats.backlog += nskb->len;
478 sch->bstats.bytes += nskb->len;
479 sch->bstats.packets++;
480
481 return NET_XMIT_SUCCESS;
482 }
483
075aa573 484 return qdisc_reshape_fail(nskb, sch);
300ce174
SH
485}
486
1e90474c 487static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
300ce174
SH
488{
489 struct fifo_sched_data *q = qdisc_priv(sch);
490
491 if (opt) {
1e90474c
PM
492 struct tc_fifo_qopt *ctl = nla_data(opt);
493 if (nla_len(opt) < sizeof(*ctl))
300ce174
SH
494 return -EINVAL;
495
496 q->limit = ctl->limit;
497 } else
498 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
499
a084980d 500 q->oldest = PSCHED_PASTPERFECT;
300ce174
SH
501 return 0;
502}
503
504static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
505{
506 struct fifo_sched_data *q = qdisc_priv(sch);
507 struct tc_fifo_qopt opt = { .limit = q->limit };
508
1e90474c 509 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
300ce174
SH
510 return skb->len;
511
1e90474c 512nla_put_failure:
300ce174
SH
513 return -1;
514}
515
20fea08b 516static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
300ce174
SH
517 .id = "tfifo",
518 .priv_size = sizeof(struct fifo_sched_data),
519 .enqueue = tfifo_enqueue,
520 .dequeue = qdisc_dequeue_head,
521 .requeue = qdisc_requeue,
522 .drop = qdisc_queue_drop,
523 .init = tfifo_init,
524 .reset = qdisc_reset_queue,
525 .change = tfifo_init,
526 .dump = tfifo_dump,
527};
528
1e90474c 529static int netem_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
530{
531 struct netem_sched_data *q = qdisc_priv(sch);
532 int ret;
533
534 if (!opt)
535 return -EINVAL;
536
59cb5c67 537 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4 538
bb949fbd
DM
539 q->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
540 &tfifo_qdisc_ops,
9f9afec4 541 TC_H_MAKE(sch->handle, 1));
1da177e4
LT
542 if (!q->qdisc) {
543 pr_debug("netem: qdisc create failed\n");
544 return -ENOMEM;
545 }
546
547 ret = netem_change(sch, opt);
548 if (ret) {
549 pr_debug("netem: change failed\n");
550 qdisc_destroy(q->qdisc);
551 }
552 return ret;
553}
554
555static void netem_destroy(struct Qdisc *sch)
556{
557 struct netem_sched_data *q = qdisc_priv(sch);
558
59cb5c67 559 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
560 qdisc_destroy(q->qdisc);
561 kfree(q->delay_dist);
562}
563
564static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
565{
566 const struct netem_sched_data *q = qdisc_priv(sch);
27a884dc 567 unsigned char *b = skb_tail_pointer(skb);
1e90474c 568 struct nlattr *nla = (struct nlattr *) b;
1da177e4
LT
569 struct tc_netem_qopt qopt;
570 struct tc_netem_corr cor;
0dca51d3 571 struct tc_netem_reorder reorder;
c865e5d9 572 struct tc_netem_corrupt corrupt;
1da177e4
LT
573
574 qopt.latency = q->latency;
575 qopt.jitter = q->jitter;
576 qopt.limit = q->limit;
577 qopt.loss = q->loss;
578 qopt.gap = q->gap;
579 qopt.duplicate = q->duplicate;
1e90474c 580 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1da177e4
LT
581
582 cor.delay_corr = q->delay_cor.rho;
583 cor.loss_corr = q->loss_cor.rho;
584 cor.dup_corr = q->dup_cor.rho;
1e90474c 585 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
0dca51d3
SH
586
587 reorder.probability = q->reorder;
588 reorder.correlation = q->reorder_cor.rho;
1e90474c 589 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
0dca51d3 590
c865e5d9
SH
591 corrupt.probability = q->corrupt;
592 corrupt.correlation = q->corrupt_cor.rho;
1e90474c 593 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
c865e5d9 594
1e90474c 595 nla->nla_len = skb_tail_pointer(skb) - b;
1da177e4
LT
596
597 return skb->len;
598
1e90474c 599nla_put_failure:
dc5fc579 600 nlmsg_trim(skb, b);
1da177e4
LT
601 return -1;
602}
603
604static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
605 struct sk_buff *skb, struct tcmsg *tcm)
606{
607 struct netem_sched_data *q = qdisc_priv(sch);
608
609 if (cl != 1) /* only one class */
610 return -ENOENT;
611
612 tcm->tcm_handle |= TC_H_MIN(1);
613 tcm->tcm_info = q->qdisc->handle;
614
615 return 0;
616}
617
618static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
619 struct Qdisc **old)
620{
621 struct netem_sched_data *q = qdisc_priv(sch);
622
623 if (new == NULL)
624 new = &noop_qdisc;
625
626 sch_tree_lock(sch);
627 *old = xchg(&q->qdisc, new);
5e50da01 628 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1da177e4 629 qdisc_reset(*old);
1da177e4
LT
630 sch_tree_unlock(sch);
631
632 return 0;
633}
634
635static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
636{
637 struct netem_sched_data *q = qdisc_priv(sch);
638 return q->qdisc;
639}
640
641static unsigned long netem_get(struct Qdisc *sch, u32 classid)
642{
643 return 1;
644}
645
646static void netem_put(struct Qdisc *sch, unsigned long arg)
647{
648}
649
10297b99 650static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1e90474c 651 struct nlattr **tca, unsigned long *arg)
1da177e4
LT
652{
653 return -ENOSYS;
654}
655
656static int netem_delete(struct Qdisc *sch, unsigned long arg)
657{
658 return -ENOSYS;
659}
660
661static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
662{
663 if (!walker->stop) {
664 if (walker->count >= walker->skip)
665 if (walker->fn(sch, 1, walker) < 0) {
666 walker->stop = 1;
667 return;
668 }
669 walker->count++;
670 }
671}
672
673static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
674{
675 return NULL;
676}
677
20fea08b 678static const struct Qdisc_class_ops netem_class_ops = {
1da177e4
LT
679 .graft = netem_graft,
680 .leaf = netem_leaf,
681 .get = netem_get,
682 .put = netem_put,
683 .change = netem_change_class,
684 .delete = netem_delete,
685 .walk = netem_walk,
686 .tcf_chain = netem_find_tcf,
687 .dump = netem_dump_class,
688};
689
20fea08b 690static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1da177e4
LT
691 .id = "netem",
692 .cl_ops = &netem_class_ops,
693 .priv_size = sizeof(struct netem_sched_data),
694 .enqueue = netem_enqueue,
695 .dequeue = netem_dequeue,
696 .requeue = netem_requeue,
697 .drop = netem_drop,
698 .init = netem_init,
699 .reset = netem_reset,
700 .destroy = netem_destroy,
701 .change = netem_change,
702 .dump = netem_dump,
703 .owner = THIS_MODULE,
704};
705
706
707static int __init netem_module_init(void)
708{
eb229c4c 709 pr_info("netem: version " VERSION "\n");
1da177e4
LT
710 return register_qdisc(&netem_qdisc_ops);
711}
712static void __exit netem_module_exit(void)
713{
714 unregister_qdisc(&netem_qdisc_ops);
715}
716module_init(netem_module_init)
717module_exit(netem_module_exit)
718MODULE_LICENSE("GPL");
This page took 0.407408 seconds and 5 git commands to generate.