[NET_SCHED]: Fix endless loops (part 2): "simple" qdiscs
[deliverable/linux.git] / net / sched / sch_netem.c
1 /*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16 #include <linux/module.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24
25 #include <net/pkt_sched.h>
26
27 #define VERSION "1.2"
28
29 /* Network Emulation Queuing algorithm.
30 ====================================
31
32 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
33 Network Emulation Tool
34 [2] Luigi Rizzo, DummyNet for FreeBSD
35
36 ----------------------------------------------------------------
37
38 This started out as a simple way to delay outgoing packets to
39 test TCP but has grown to include most of the functionality
40 of a full blown network emulator like NISTnet. It can delay
41 packets and add random jitter (and correlation). The random
42 distribution can be loaded from a table as well to provide
43 normal, Pareto, or experimental curves. Packet loss,
44 duplication, and reordering can also be emulated.
45
46 This qdisc does not do classification that can be handled in
47 layering other disciplines. It does not need to do bandwidth
48 control either since that can be handled by using token
49 bucket or other rate control.
50
51 The simulator is limited by the Linux timer resolution
52 and will create packet bursts on the HZ boundary (1ms).
53 */
54
55 struct netem_sched_data {
56 struct Qdisc *qdisc;
57 struct timer_list timer;
58
59 u32 latency;
60 u32 loss;
61 u32 limit;
62 u32 counter;
63 u32 gap;
64 u32 jitter;
65 u32 duplicate;
66 u32 reorder;
67 u32 corrupt;
68
69 struct crndstate {
70 unsigned long last;
71 unsigned long rho;
72 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
73
74 struct disttable {
75 u32 size;
76 s16 table[0];
77 } *delay_dist;
78 };
79
80 /* Time stamp put into socket buffer control block */
81 struct netem_skb_cb {
82 psched_time_t time_to_send;
83 };
84
85 /* init_crandom - initialize correlated random number generator
86 * Use entropy source for initial seed.
87 */
88 static void init_crandom(struct crndstate *state, unsigned long rho)
89 {
90 state->rho = rho;
91 state->last = net_random();
92 }
93
94 /* get_crandom - correlated random number generator
95 * Next number depends on last value.
96 * rho is scaled to avoid floating point.
97 */
98 static unsigned long get_crandom(struct crndstate *state)
99 {
100 u64 value, rho;
101 unsigned long answer;
102
103 if (state->rho == 0) /* no correllation */
104 return net_random();
105
106 value = net_random();
107 rho = (u64)state->rho + 1;
108 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
109 state->last = answer;
110 return answer;
111 }
112
113 /* tabledist - return a pseudo-randomly distributed value with mean mu and
114 * std deviation sigma. Uses table lookup to approximate the desired
115 * distribution, and a uniformly-distributed pseudo-random source.
116 */
117 static long tabledist(unsigned long mu, long sigma,
118 struct crndstate *state, const struct disttable *dist)
119 {
120 long t, x;
121 unsigned long rnd;
122
123 if (sigma == 0)
124 return mu;
125
126 rnd = get_crandom(state);
127
128 /* default uniform distribution */
129 if (dist == NULL)
130 return (rnd % (2*sigma)) - sigma + mu;
131
132 t = dist->table[rnd % dist->size];
133 x = (sigma % NETEM_DIST_SCALE) * t;
134 if (x >= 0)
135 x += NETEM_DIST_SCALE/2;
136 else
137 x -= NETEM_DIST_SCALE/2;
138
139 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
140 }
141
142 /*
143 * Insert one skb into qdisc.
144 * Note: parent depends on return value to account for queue length.
145 * NET_XMIT_DROP: queue length didn't change.
146 * NET_XMIT_SUCCESS: one skb was queued.
147 */
148 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
149 {
150 struct netem_sched_data *q = qdisc_priv(sch);
151 /* We don't fill cb now as skb_unshare() may invalidate it */
152 struct netem_skb_cb *cb;
153 struct sk_buff *skb2;
154 int ret;
155 int count = 1;
156
157 pr_debug("netem_enqueue skb=%p\n", skb);
158
159 /* Random duplication */
160 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
161 ++count;
162
163 /* Random packet drop 0 => none, ~0 => all */
164 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
165 --count;
166
167 if (count == 0) {
168 sch->qstats.drops++;
169 kfree_skb(skb);
170 return NET_XMIT_BYPASS;
171 }
172
173 skb_orphan(skb);
174
175 /*
176 * If we need to duplicate packet, then re-insert at top of the
177 * qdisc tree, since parent queuer expects that only one
178 * skb will be queued.
179 */
180 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
181 struct Qdisc *rootq = sch->dev->qdisc;
182 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
183 q->duplicate = 0;
184
185 rootq->enqueue(skb2, rootq);
186 q->duplicate = dupsave;
187 }
188
189 /*
190 * Randomized packet corruption.
191 * Make copy if needed since we are modifying
192 * If packet is going to be hardware checksummed, then
193 * do it now in software before we mangle it.
194 */
195 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
196 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
197 || (skb->ip_summed == CHECKSUM_PARTIAL
198 && skb_checksum_help(skb))) {
199 sch->qstats.drops++;
200 return NET_XMIT_DROP;
201 }
202
203 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
204 }
205
206 cb = (struct netem_skb_cb *)skb->cb;
207 if (q->gap == 0 /* not doing reordering */
208 || q->counter < q->gap /* inside last reordering gap */
209 || q->reorder < get_crandom(&q->reorder_cor)) {
210 psched_time_t now;
211 psched_tdiff_t delay;
212
213 delay = tabledist(q->latency, q->jitter,
214 &q->delay_cor, q->delay_dist);
215
216 PSCHED_GET_TIME(now);
217 PSCHED_TADD2(now, delay, cb->time_to_send);
218 ++q->counter;
219 ret = q->qdisc->enqueue(skb, q->qdisc);
220 } else {
221 /*
222 * Do re-ordering by putting one out of N packets at the front
223 * of the queue.
224 */
225 PSCHED_GET_TIME(cb->time_to_send);
226 q->counter = 0;
227 ret = q->qdisc->ops->requeue(skb, q->qdisc);
228 }
229
230 if (likely(ret == NET_XMIT_SUCCESS)) {
231 sch->q.qlen++;
232 sch->bstats.bytes += skb->len;
233 sch->bstats.packets++;
234 } else
235 sch->qstats.drops++;
236
237 pr_debug("netem: enqueue ret %d\n", ret);
238 return ret;
239 }
240
241 /* Requeue packets but don't change time stamp */
242 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
243 {
244 struct netem_sched_data *q = qdisc_priv(sch);
245 int ret;
246
247 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
248 sch->q.qlen++;
249 sch->qstats.requeues++;
250 }
251
252 return ret;
253 }
254
255 static unsigned int netem_drop(struct Qdisc* sch)
256 {
257 struct netem_sched_data *q = qdisc_priv(sch);
258 unsigned int len = 0;
259
260 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
261 sch->q.qlen--;
262 sch->qstats.drops++;
263 }
264 return len;
265 }
266
267 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
268 {
269 struct netem_sched_data *q = qdisc_priv(sch);
270 struct sk_buff *skb;
271
272 skb = q->qdisc->dequeue(q->qdisc);
273 if (skb) {
274 const struct netem_skb_cb *cb
275 = (const struct netem_skb_cb *)skb->cb;
276 psched_time_t now;
277
278 /* if more time remaining? */
279 PSCHED_GET_TIME(now);
280
281 if (PSCHED_TLESS(cb->time_to_send, now)) {
282 pr_debug("netem_dequeue: return skb=%p\n", skb);
283 sch->q.qlen--;
284 sch->flags &= ~TCQ_F_THROTTLED;
285 return skb;
286 } else {
287 psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);
288
289 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
290 sch->qstats.drops++;
291
292 /* After this qlen is confused */
293 printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
294 q->qdisc->ops->id);
295
296 sch->q.qlen--;
297 }
298
299 mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
300 sch->flags |= TCQ_F_THROTTLED;
301 }
302 }
303
304 return NULL;
305 }
306
307 static void netem_watchdog(unsigned long arg)
308 {
309 struct Qdisc *sch = (struct Qdisc *)arg;
310
311 pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen);
312 sch->flags &= ~TCQ_F_THROTTLED;
313 netif_schedule(sch->dev);
314 }
315
316 static void netem_reset(struct Qdisc *sch)
317 {
318 struct netem_sched_data *q = qdisc_priv(sch);
319
320 qdisc_reset(q->qdisc);
321 sch->q.qlen = 0;
322 sch->flags &= ~TCQ_F_THROTTLED;
323 del_timer_sync(&q->timer);
324 }
325
326 /* Pass size change message down to embedded FIFO */
327 static int set_fifo_limit(struct Qdisc *q, int limit)
328 {
329 struct rtattr *rta;
330 int ret = -ENOMEM;
331
332 /* Hack to avoid sending change message to non-FIFO */
333 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
334 return 0;
335
336 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
337 if (rta) {
338 rta->rta_type = RTM_NEWQDISC;
339 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
340 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
341
342 ret = q->ops->change(q, rta);
343 kfree(rta);
344 }
345 return ret;
346 }
347
348 /*
349 * Distribution data is a variable size payload containing
350 * signed 16 bit values.
351 */
352 static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
353 {
354 struct netem_sched_data *q = qdisc_priv(sch);
355 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
356 const __s16 *data = RTA_DATA(attr);
357 struct disttable *d;
358 int i;
359
360 if (n > 65536)
361 return -EINVAL;
362
363 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
364 if (!d)
365 return -ENOMEM;
366
367 d->size = n;
368 for (i = 0; i < n; i++)
369 d->table[i] = data[i];
370
371 spin_lock_bh(&sch->dev->queue_lock);
372 d = xchg(&q->delay_dist, d);
373 spin_unlock_bh(&sch->dev->queue_lock);
374
375 kfree(d);
376 return 0;
377 }
378
379 static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
380 {
381 struct netem_sched_data *q = qdisc_priv(sch);
382 const struct tc_netem_corr *c = RTA_DATA(attr);
383
384 if (RTA_PAYLOAD(attr) != sizeof(*c))
385 return -EINVAL;
386
387 init_crandom(&q->delay_cor, c->delay_corr);
388 init_crandom(&q->loss_cor, c->loss_corr);
389 init_crandom(&q->dup_cor, c->dup_corr);
390 return 0;
391 }
392
393 static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
394 {
395 struct netem_sched_data *q = qdisc_priv(sch);
396 const struct tc_netem_reorder *r = RTA_DATA(attr);
397
398 if (RTA_PAYLOAD(attr) != sizeof(*r))
399 return -EINVAL;
400
401 q->reorder = r->probability;
402 init_crandom(&q->reorder_cor, r->correlation);
403 return 0;
404 }
405
406 static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
407 {
408 struct netem_sched_data *q = qdisc_priv(sch);
409 const struct tc_netem_corrupt *r = RTA_DATA(attr);
410
411 if (RTA_PAYLOAD(attr) != sizeof(*r))
412 return -EINVAL;
413
414 q->corrupt = r->probability;
415 init_crandom(&q->corrupt_cor, r->correlation);
416 return 0;
417 }
418
419 /* Parse netlink message to set options */
420 static int netem_change(struct Qdisc *sch, struct rtattr *opt)
421 {
422 struct netem_sched_data *q = qdisc_priv(sch);
423 struct tc_netem_qopt *qopt;
424 int ret;
425
426 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
427 return -EINVAL;
428
429 qopt = RTA_DATA(opt);
430 ret = set_fifo_limit(q->qdisc, qopt->limit);
431 if (ret) {
432 pr_debug("netem: can't set fifo limit\n");
433 return ret;
434 }
435
436 q->latency = qopt->latency;
437 q->jitter = qopt->jitter;
438 q->limit = qopt->limit;
439 q->gap = qopt->gap;
440 q->counter = 0;
441 q->loss = qopt->loss;
442 q->duplicate = qopt->duplicate;
443
444 /* for compatiablity with earlier versions.
445 * if gap is set, need to assume 100% probablity
446 */
447 q->reorder = ~0;
448
449 /* Handle nested options after initial queue options.
450 * Should have put all options in nested format but too late now.
451 */
452 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
453 struct rtattr *tb[TCA_NETEM_MAX];
454 if (rtattr_parse(tb, TCA_NETEM_MAX,
455 RTA_DATA(opt) + sizeof(*qopt),
456 RTA_PAYLOAD(opt) - sizeof(*qopt)))
457 return -EINVAL;
458
459 if (tb[TCA_NETEM_CORR-1]) {
460 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
461 if (ret)
462 return ret;
463 }
464
465 if (tb[TCA_NETEM_DELAY_DIST-1]) {
466 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
467 if (ret)
468 return ret;
469 }
470
471 if (tb[TCA_NETEM_REORDER-1]) {
472 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
473 if (ret)
474 return ret;
475 }
476
477 if (tb[TCA_NETEM_CORRUPT-1]) {
478 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
479 if (ret)
480 return ret;
481 }
482 }
483
484 return 0;
485 }
486
487 /*
488 * Special case version of FIFO queue for use by netem.
489 * It queues in order based on timestamps in skb's
490 */
491 struct fifo_sched_data {
492 u32 limit;
493 };
494
495 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
496 {
497 struct fifo_sched_data *q = qdisc_priv(sch);
498 struct sk_buff_head *list = &sch->q;
499 const struct netem_skb_cb *ncb
500 = (const struct netem_skb_cb *)nskb->cb;
501 struct sk_buff *skb;
502
503 if (likely(skb_queue_len(list) < q->limit)) {
504 skb_queue_reverse_walk(list, skb) {
505 const struct netem_skb_cb *cb
506 = (const struct netem_skb_cb *)skb->cb;
507
508 if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send))
509 break;
510 }
511
512 __skb_queue_after(list, skb, nskb);
513
514 sch->qstats.backlog += nskb->len;
515 sch->bstats.bytes += nskb->len;
516 sch->bstats.packets++;
517
518 return NET_XMIT_SUCCESS;
519 }
520
521 return qdisc_drop(nskb, sch);
522 }
523
524 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
525 {
526 struct fifo_sched_data *q = qdisc_priv(sch);
527
528 if (opt) {
529 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
530 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
531 return -EINVAL;
532
533 q->limit = ctl->limit;
534 } else
535 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
536
537 return 0;
538 }
539
540 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
541 {
542 struct fifo_sched_data *q = qdisc_priv(sch);
543 struct tc_fifo_qopt opt = { .limit = q->limit };
544
545 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
546 return skb->len;
547
548 rtattr_failure:
549 return -1;
550 }
551
552 static struct Qdisc_ops tfifo_qdisc_ops = {
553 .id = "tfifo",
554 .priv_size = sizeof(struct fifo_sched_data),
555 .enqueue = tfifo_enqueue,
556 .dequeue = qdisc_dequeue_head,
557 .requeue = qdisc_requeue,
558 .drop = qdisc_queue_drop,
559 .init = tfifo_init,
560 .reset = qdisc_reset_queue,
561 .change = tfifo_init,
562 .dump = tfifo_dump,
563 };
564
565 static int netem_init(struct Qdisc *sch, struct rtattr *opt)
566 {
567 struct netem_sched_data *q = qdisc_priv(sch);
568 int ret;
569
570 if (!opt)
571 return -EINVAL;
572
573 init_timer(&q->timer);
574 q->timer.function = netem_watchdog;
575 q->timer.data = (unsigned long) sch;
576
577 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
578 TC_H_MAKE(sch->handle, 1));
579 if (!q->qdisc) {
580 pr_debug("netem: qdisc create failed\n");
581 return -ENOMEM;
582 }
583
584 ret = netem_change(sch, opt);
585 if (ret) {
586 pr_debug("netem: change failed\n");
587 qdisc_destroy(q->qdisc);
588 }
589 return ret;
590 }
591
592 static void netem_destroy(struct Qdisc *sch)
593 {
594 struct netem_sched_data *q = qdisc_priv(sch);
595
596 del_timer_sync(&q->timer);
597 qdisc_destroy(q->qdisc);
598 kfree(q->delay_dist);
599 }
600
601 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
602 {
603 const struct netem_sched_data *q = qdisc_priv(sch);
604 unsigned char *b = skb->tail;
605 struct rtattr *rta = (struct rtattr *) b;
606 struct tc_netem_qopt qopt;
607 struct tc_netem_corr cor;
608 struct tc_netem_reorder reorder;
609 struct tc_netem_corrupt corrupt;
610
611 qopt.latency = q->latency;
612 qopt.jitter = q->jitter;
613 qopt.limit = q->limit;
614 qopt.loss = q->loss;
615 qopt.gap = q->gap;
616 qopt.duplicate = q->duplicate;
617 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
618
619 cor.delay_corr = q->delay_cor.rho;
620 cor.loss_corr = q->loss_cor.rho;
621 cor.dup_corr = q->dup_cor.rho;
622 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
623
624 reorder.probability = q->reorder;
625 reorder.correlation = q->reorder_cor.rho;
626 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
627
628 corrupt.probability = q->corrupt;
629 corrupt.correlation = q->corrupt_cor.rho;
630 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
631
632 rta->rta_len = skb->tail - b;
633
634 return skb->len;
635
636 rtattr_failure:
637 skb_trim(skb, b - skb->data);
638 return -1;
639 }
640
641 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
642 struct sk_buff *skb, struct tcmsg *tcm)
643 {
644 struct netem_sched_data *q = qdisc_priv(sch);
645
646 if (cl != 1) /* only one class */
647 return -ENOENT;
648
649 tcm->tcm_handle |= TC_H_MIN(1);
650 tcm->tcm_info = q->qdisc->handle;
651
652 return 0;
653 }
654
655 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
656 struct Qdisc **old)
657 {
658 struct netem_sched_data *q = qdisc_priv(sch);
659
660 if (new == NULL)
661 new = &noop_qdisc;
662
663 sch_tree_lock(sch);
664 *old = xchg(&q->qdisc, new);
665 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
666 qdisc_reset(*old);
667 sch_tree_unlock(sch);
668
669 return 0;
670 }
671
672 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
673 {
674 struct netem_sched_data *q = qdisc_priv(sch);
675 return q->qdisc;
676 }
677
678 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
679 {
680 return 1;
681 }
682
683 static void netem_put(struct Qdisc *sch, unsigned long arg)
684 {
685 }
686
687 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
688 struct rtattr **tca, unsigned long *arg)
689 {
690 return -ENOSYS;
691 }
692
693 static int netem_delete(struct Qdisc *sch, unsigned long arg)
694 {
695 return -ENOSYS;
696 }
697
698 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
699 {
700 if (!walker->stop) {
701 if (walker->count >= walker->skip)
702 if (walker->fn(sch, 1, walker) < 0) {
703 walker->stop = 1;
704 return;
705 }
706 walker->count++;
707 }
708 }
709
710 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
711 {
712 return NULL;
713 }
714
715 static struct Qdisc_class_ops netem_class_ops = {
716 .graft = netem_graft,
717 .leaf = netem_leaf,
718 .get = netem_get,
719 .put = netem_put,
720 .change = netem_change_class,
721 .delete = netem_delete,
722 .walk = netem_walk,
723 .tcf_chain = netem_find_tcf,
724 .dump = netem_dump_class,
725 };
726
727 static struct Qdisc_ops netem_qdisc_ops = {
728 .id = "netem",
729 .cl_ops = &netem_class_ops,
730 .priv_size = sizeof(struct netem_sched_data),
731 .enqueue = netem_enqueue,
732 .dequeue = netem_dequeue,
733 .requeue = netem_requeue,
734 .drop = netem_drop,
735 .init = netem_init,
736 .reset = netem_reset,
737 .destroy = netem_destroy,
738 .change = netem_change,
739 .dump = netem_dump,
740 .owner = THIS_MODULE,
741 };
742
743
744 static int __init netem_module_init(void)
745 {
746 pr_info("netem: version " VERSION "\n");
747 return register_qdisc(&netem_qdisc_ops);
748 }
749 static void __exit netem_module_exit(void)
750 {
751 unregister_qdisc(&netem_qdisc_ops);
752 }
753 module_init(netem_module_init)
754 module_exit(netem_module_exit)
755 MODULE_LICENSE("GPL");
This page took 0.174448 seconds and 5 git commands to generate.