[NETLINK]: Directly return -EINTR from netlink_dump_start()
[deliverable/linux.git] / net / sched / sch_netem.c
... / ...
CommitLineData
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16#include <linux/module.h>
17#include <linux/bitops.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24
25#include <net/netlink.h>
26#include <net/pkt_sched.h>
27
28#define VERSION "1.2"
29
30/* Network Emulation Queuing algorithm.
31 ====================================
32
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
36
37 ----------------------------------------------------------------
38
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
46
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
51
52 The simulator is limited by the Linux timer resolution
53 and will create packet bursts on the HZ boundary (1ms).
54*/
55
56struct netem_sched_data {
57 struct Qdisc *qdisc;
58 struct qdisc_watchdog watchdog;
59
60 psched_tdiff_t latency;
61 psched_tdiff_t jitter;
62
63 u32 loss;
64 u32 limit;
65 u32 counter;
66 u32 gap;
67 u32 duplicate;
68 u32 reorder;
69 u32 corrupt;
70
71 struct crndstate {
72 u32 last;
73 u32 rho;
74 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
75
76 struct disttable {
77 u32 size;
78 s16 table[0];
79 } *delay_dist;
80};
81
82/* Time stamp put into socket buffer control block */
83struct netem_skb_cb {
84 psched_time_t time_to_send;
85};
86
87/* init_crandom - initialize correlated random number generator
88 * Use entropy source for initial seed.
89 */
90static void init_crandom(struct crndstate *state, unsigned long rho)
91{
92 state->rho = rho;
93 state->last = net_random();
94}
95
96/* get_crandom - correlated random number generator
97 * Next number depends on last value.
98 * rho is scaled to avoid floating point.
99 */
100static u32 get_crandom(struct crndstate *state)
101{
102 u64 value, rho;
103 unsigned long answer;
104
105 if (state->rho == 0) /* no correllation */
106 return net_random();
107
108 value = net_random();
109 rho = (u64)state->rho + 1;
110 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
111 state->last = answer;
112 return answer;
113}
114
115/* tabledist - return a pseudo-randomly distributed value with mean mu and
116 * std deviation sigma. Uses table lookup to approximate the desired
117 * distribution, and a uniformly-distributed pseudo-random source.
118 */
119static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
120 struct crndstate *state,
121 const struct disttable *dist)
122{
123 psched_tdiff_t x;
124 long t;
125 u32 rnd;
126
127 if (sigma == 0)
128 return mu;
129
130 rnd = get_crandom(state);
131
132 /* default uniform distribution */
133 if (dist == NULL)
134 return (rnd % (2*sigma)) - sigma + mu;
135
136 t = dist->table[rnd % dist->size];
137 x = (sigma % NETEM_DIST_SCALE) * t;
138 if (x >= 0)
139 x += NETEM_DIST_SCALE/2;
140 else
141 x -= NETEM_DIST_SCALE/2;
142
143 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
144}
145
146/*
147 * Insert one skb into qdisc.
148 * Note: parent depends on return value to account for queue length.
149 * NET_XMIT_DROP: queue length didn't change.
150 * NET_XMIT_SUCCESS: one skb was queued.
151 */
152static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
153{
154 struct netem_sched_data *q = qdisc_priv(sch);
155 /* We don't fill cb now as skb_unshare() may invalidate it */
156 struct netem_skb_cb *cb;
157 struct sk_buff *skb2;
158 int ret;
159 int count = 1;
160
161 pr_debug("netem_enqueue skb=%p\n", skb);
162
163 /* Random duplication */
164 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
165 ++count;
166
167 /* Random packet drop 0 => none, ~0 => all */
168 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
169 --count;
170
171 if (count == 0) {
172 sch->qstats.drops++;
173 kfree_skb(skb);
174 return NET_XMIT_BYPASS;
175 }
176
177 skb_orphan(skb);
178
179 /*
180 * If we need to duplicate packet, then re-insert at top of the
181 * qdisc tree, since parent queuer expects that only one
182 * skb will be queued.
183 */
184 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
185 struct Qdisc *rootq = sch->dev->qdisc;
186 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
187 q->duplicate = 0;
188
189 rootq->enqueue(skb2, rootq);
190 q->duplicate = dupsave;
191 }
192
193 /*
194 * Randomized packet corruption.
195 * Make copy if needed since we are modifying
196 * If packet is going to be hardware checksummed, then
197 * do it now in software before we mangle it.
198 */
199 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
200 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
201 || (skb->ip_summed == CHECKSUM_PARTIAL
202 && skb_checksum_help(skb))) {
203 sch->qstats.drops++;
204 return NET_XMIT_DROP;
205 }
206
207 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
208 }
209
210 cb = (struct netem_skb_cb *)skb->cb;
211 if (q->gap == 0 /* not doing reordering */
212 || q->counter < q->gap /* inside last reordering gap */
213 || q->reorder < get_crandom(&q->reorder_cor)) {
214 psched_time_t now;
215 psched_tdiff_t delay;
216
217 delay = tabledist(q->latency, q->jitter,
218 &q->delay_cor, q->delay_dist);
219
220 PSCHED_GET_TIME(now);
221 PSCHED_TADD2(now, delay, cb->time_to_send);
222 ++q->counter;
223 ret = q->qdisc->enqueue(skb, q->qdisc);
224 } else {
225 /*
226 * Do re-ordering by putting one out of N packets at the front
227 * of the queue.
228 */
229 PSCHED_GET_TIME(cb->time_to_send);
230 q->counter = 0;
231 ret = q->qdisc->ops->requeue(skb, q->qdisc);
232 }
233
234 if (likely(ret == NET_XMIT_SUCCESS)) {
235 sch->q.qlen++;
236 sch->bstats.bytes += skb->len;
237 sch->bstats.packets++;
238 } else
239 sch->qstats.drops++;
240
241 pr_debug("netem: enqueue ret %d\n", ret);
242 return ret;
243}
244
245/* Requeue packets but don't change time stamp */
246static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
247{
248 struct netem_sched_data *q = qdisc_priv(sch);
249 int ret;
250
251 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
252 sch->q.qlen++;
253 sch->qstats.requeues++;
254 }
255
256 return ret;
257}
258
259static unsigned int netem_drop(struct Qdisc* sch)
260{
261 struct netem_sched_data *q = qdisc_priv(sch);
262 unsigned int len = 0;
263
264 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
265 sch->q.qlen--;
266 sch->qstats.drops++;
267 }
268 return len;
269}
270
271static struct sk_buff *netem_dequeue(struct Qdisc *sch)
272{
273 struct netem_sched_data *q = qdisc_priv(sch);
274 struct sk_buff *skb;
275
276 smp_mb();
277 if (sch->flags & TCQ_F_THROTTLED)
278 return NULL;
279
280 skb = q->qdisc->dequeue(q->qdisc);
281 if (skb) {
282 const struct netem_skb_cb *cb
283 = (const struct netem_skb_cb *)skb->cb;
284 psched_time_t now;
285
286 /* if more time remaining? */
287 PSCHED_GET_TIME(now);
288
289 if (PSCHED_TLESS(cb->time_to_send, now)) {
290 pr_debug("netem_dequeue: return skb=%p\n", skb);
291 sch->q.qlen--;
292 return skb;
293 }
294
295 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
296 qdisc_tree_decrease_qlen(q->qdisc, 1);
297 sch->qstats.drops++;
298 printk(KERN_ERR "netem: %s could not requeue\n",
299 q->qdisc->ops->id);
300 }
301
302 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
303 }
304
305 return NULL;
306}
307
308static void netem_reset(struct Qdisc *sch)
309{
310 struct netem_sched_data *q = qdisc_priv(sch);
311
312 qdisc_reset(q->qdisc);
313 sch->q.qlen = 0;
314 qdisc_watchdog_cancel(&q->watchdog);
315}
316
317/* Pass size change message down to embedded FIFO */
318static int set_fifo_limit(struct Qdisc *q, int limit)
319{
320 struct rtattr *rta;
321 int ret = -ENOMEM;
322
323 /* Hack to avoid sending change message to non-FIFO */
324 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
325 return 0;
326
327 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
328 if (rta) {
329 rta->rta_type = RTM_NEWQDISC;
330 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
331 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
332
333 ret = q->ops->change(q, rta);
334 kfree(rta);
335 }
336 return ret;
337}
338
339/*
340 * Distribution data is a variable size payload containing
341 * signed 16 bit values.
342 */
343static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
344{
345 struct netem_sched_data *q = qdisc_priv(sch);
346 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
347 const __s16 *data = RTA_DATA(attr);
348 struct disttable *d;
349 int i;
350
351 if (n > 65536)
352 return -EINVAL;
353
354 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
355 if (!d)
356 return -ENOMEM;
357
358 d->size = n;
359 for (i = 0; i < n; i++)
360 d->table[i] = data[i];
361
362 spin_lock_bh(&sch->dev->queue_lock);
363 d = xchg(&q->delay_dist, d);
364 spin_unlock_bh(&sch->dev->queue_lock);
365
366 kfree(d);
367 return 0;
368}
369
370static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
371{
372 struct netem_sched_data *q = qdisc_priv(sch);
373 const struct tc_netem_corr *c = RTA_DATA(attr);
374
375 if (RTA_PAYLOAD(attr) != sizeof(*c))
376 return -EINVAL;
377
378 init_crandom(&q->delay_cor, c->delay_corr);
379 init_crandom(&q->loss_cor, c->loss_corr);
380 init_crandom(&q->dup_cor, c->dup_corr);
381 return 0;
382}
383
384static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
385{
386 struct netem_sched_data *q = qdisc_priv(sch);
387 const struct tc_netem_reorder *r = RTA_DATA(attr);
388
389 if (RTA_PAYLOAD(attr) != sizeof(*r))
390 return -EINVAL;
391
392 q->reorder = r->probability;
393 init_crandom(&q->reorder_cor, r->correlation);
394 return 0;
395}
396
397static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
398{
399 struct netem_sched_data *q = qdisc_priv(sch);
400 const struct tc_netem_corrupt *r = RTA_DATA(attr);
401
402 if (RTA_PAYLOAD(attr) != sizeof(*r))
403 return -EINVAL;
404
405 q->corrupt = r->probability;
406 init_crandom(&q->corrupt_cor, r->correlation);
407 return 0;
408}
409
410/* Parse netlink message to set options */
411static int netem_change(struct Qdisc *sch, struct rtattr *opt)
412{
413 struct netem_sched_data *q = qdisc_priv(sch);
414 struct tc_netem_qopt *qopt;
415 int ret;
416
417 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
418 return -EINVAL;
419
420 qopt = RTA_DATA(opt);
421 ret = set_fifo_limit(q->qdisc, qopt->limit);
422 if (ret) {
423 pr_debug("netem: can't set fifo limit\n");
424 return ret;
425 }
426
427 q->latency = qopt->latency;
428 q->jitter = qopt->jitter;
429 q->limit = qopt->limit;
430 q->gap = qopt->gap;
431 q->counter = 0;
432 q->loss = qopt->loss;
433 q->duplicate = qopt->duplicate;
434
435 /* for compatiablity with earlier versions.
436 * if gap is set, need to assume 100% probablity
437 */
438 if (q->gap)
439 q->reorder = ~0;
440
441 /* Handle nested options after initial queue options.
442 * Should have put all options in nested format but too late now.
443 */
444 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
445 struct rtattr *tb[TCA_NETEM_MAX];
446 if (rtattr_parse(tb, TCA_NETEM_MAX,
447 RTA_DATA(opt) + sizeof(*qopt),
448 RTA_PAYLOAD(opt) - sizeof(*qopt)))
449 return -EINVAL;
450
451 if (tb[TCA_NETEM_CORR-1]) {
452 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
453 if (ret)
454 return ret;
455 }
456
457 if (tb[TCA_NETEM_DELAY_DIST-1]) {
458 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
459 if (ret)
460 return ret;
461 }
462
463 if (tb[TCA_NETEM_REORDER-1]) {
464 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
465 if (ret)
466 return ret;
467 }
468
469 if (tb[TCA_NETEM_CORRUPT-1]) {
470 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
471 if (ret)
472 return ret;
473 }
474 }
475
476 return 0;
477}
478
479/*
480 * Special case version of FIFO queue for use by netem.
481 * It queues in order based on timestamps in skb's
482 */
483struct fifo_sched_data {
484 u32 limit;
485 psched_time_t oldest;
486};
487
488static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
489{
490 struct fifo_sched_data *q = qdisc_priv(sch);
491 struct sk_buff_head *list = &sch->q;
492 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
493 struct sk_buff *skb;
494
495 if (likely(skb_queue_len(list) < q->limit)) {
496 /* Optimize for add at tail */
497 if (likely(skb_queue_empty(list) || !PSCHED_TLESS(tnext, q->oldest))) {
498 q->oldest = tnext;
499 return qdisc_enqueue_tail(nskb, sch);
500 }
501
502 skb_queue_reverse_walk(list, skb) {
503 const struct netem_skb_cb *cb
504 = (const struct netem_skb_cb *)skb->cb;
505
506 if (!PSCHED_TLESS(tnext, cb->time_to_send))
507 break;
508 }
509
510 __skb_queue_after(list, skb, nskb);
511
512 sch->qstats.backlog += nskb->len;
513 sch->bstats.bytes += nskb->len;
514 sch->bstats.packets++;
515
516 return NET_XMIT_SUCCESS;
517 }
518
519 return qdisc_reshape_fail(nskb, sch);
520}
521
522static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
523{
524 struct fifo_sched_data *q = qdisc_priv(sch);
525
526 if (opt) {
527 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
528 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
529 return -EINVAL;
530
531 q->limit = ctl->limit;
532 } else
533 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
534
535 PSCHED_SET_PASTPERFECT(q->oldest);
536 return 0;
537}
538
539static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
540{
541 struct fifo_sched_data *q = qdisc_priv(sch);
542 struct tc_fifo_qopt opt = { .limit = q->limit };
543
544 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
545 return skb->len;
546
547rtattr_failure:
548 return -1;
549}
550
551static struct Qdisc_ops tfifo_qdisc_ops = {
552 .id = "tfifo",
553 .priv_size = sizeof(struct fifo_sched_data),
554 .enqueue = tfifo_enqueue,
555 .dequeue = qdisc_dequeue_head,
556 .requeue = qdisc_requeue,
557 .drop = qdisc_queue_drop,
558 .init = tfifo_init,
559 .reset = qdisc_reset_queue,
560 .change = tfifo_init,
561 .dump = tfifo_dump,
562};
563
564static int netem_init(struct Qdisc *sch, struct rtattr *opt)
565{
566 struct netem_sched_data *q = qdisc_priv(sch);
567 int ret;
568
569 if (!opt)
570 return -EINVAL;
571
572 qdisc_watchdog_init(&q->watchdog, sch);
573
574 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
575 TC_H_MAKE(sch->handle, 1));
576 if (!q->qdisc) {
577 pr_debug("netem: qdisc create failed\n");
578 return -ENOMEM;
579 }
580
581 ret = netem_change(sch, opt);
582 if (ret) {
583 pr_debug("netem: change failed\n");
584 qdisc_destroy(q->qdisc);
585 }
586 return ret;
587}
588
589static void netem_destroy(struct Qdisc *sch)
590{
591 struct netem_sched_data *q = qdisc_priv(sch);
592
593 qdisc_watchdog_cancel(&q->watchdog);
594 qdisc_destroy(q->qdisc);
595 kfree(q->delay_dist);
596}
597
598static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
599{
600 const struct netem_sched_data *q = qdisc_priv(sch);
601 unsigned char *b = skb_tail_pointer(skb);
602 struct rtattr *rta = (struct rtattr *) b;
603 struct tc_netem_qopt qopt;
604 struct tc_netem_corr cor;
605 struct tc_netem_reorder reorder;
606 struct tc_netem_corrupt corrupt;
607
608 qopt.latency = q->latency;
609 qopt.jitter = q->jitter;
610 qopt.limit = q->limit;
611 qopt.loss = q->loss;
612 qopt.gap = q->gap;
613 qopt.duplicate = q->duplicate;
614 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
615
616 cor.delay_corr = q->delay_cor.rho;
617 cor.loss_corr = q->loss_cor.rho;
618 cor.dup_corr = q->dup_cor.rho;
619 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
620
621 reorder.probability = q->reorder;
622 reorder.correlation = q->reorder_cor.rho;
623 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
624
625 corrupt.probability = q->corrupt;
626 corrupt.correlation = q->corrupt_cor.rho;
627 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
628
629 rta->rta_len = skb_tail_pointer(skb) - b;
630
631 return skb->len;
632
633rtattr_failure:
634 nlmsg_trim(skb, b);
635 return -1;
636}
637
638static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
639 struct sk_buff *skb, struct tcmsg *tcm)
640{
641 struct netem_sched_data *q = qdisc_priv(sch);
642
643 if (cl != 1) /* only one class */
644 return -ENOENT;
645
646 tcm->tcm_handle |= TC_H_MIN(1);
647 tcm->tcm_info = q->qdisc->handle;
648
649 return 0;
650}
651
652static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
653 struct Qdisc **old)
654{
655 struct netem_sched_data *q = qdisc_priv(sch);
656
657 if (new == NULL)
658 new = &noop_qdisc;
659
660 sch_tree_lock(sch);
661 *old = xchg(&q->qdisc, new);
662 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
663 qdisc_reset(*old);
664 sch_tree_unlock(sch);
665
666 return 0;
667}
668
669static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
670{
671 struct netem_sched_data *q = qdisc_priv(sch);
672 return q->qdisc;
673}
674
675static unsigned long netem_get(struct Qdisc *sch, u32 classid)
676{
677 return 1;
678}
679
680static void netem_put(struct Qdisc *sch, unsigned long arg)
681{
682}
683
684static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
685 struct rtattr **tca, unsigned long *arg)
686{
687 return -ENOSYS;
688}
689
690static int netem_delete(struct Qdisc *sch, unsigned long arg)
691{
692 return -ENOSYS;
693}
694
695static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
696{
697 if (!walker->stop) {
698 if (walker->count >= walker->skip)
699 if (walker->fn(sch, 1, walker) < 0) {
700 walker->stop = 1;
701 return;
702 }
703 walker->count++;
704 }
705}
706
707static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
708{
709 return NULL;
710}
711
712static struct Qdisc_class_ops netem_class_ops = {
713 .graft = netem_graft,
714 .leaf = netem_leaf,
715 .get = netem_get,
716 .put = netem_put,
717 .change = netem_change_class,
718 .delete = netem_delete,
719 .walk = netem_walk,
720 .tcf_chain = netem_find_tcf,
721 .dump = netem_dump_class,
722};
723
724static struct Qdisc_ops netem_qdisc_ops = {
725 .id = "netem",
726 .cl_ops = &netem_class_ops,
727 .priv_size = sizeof(struct netem_sched_data),
728 .enqueue = netem_enqueue,
729 .dequeue = netem_dequeue,
730 .requeue = netem_requeue,
731 .drop = netem_drop,
732 .init = netem_init,
733 .reset = netem_reset,
734 .destroy = netem_destroy,
735 .change = netem_change,
736 .dump = netem_dump,
737 .owner = THIS_MODULE,
738};
739
740
741static int __init netem_module_init(void)
742{
743 pr_info("netem: version " VERSION "\n");
744 return register_qdisc(&netem_qdisc_ops);
745}
746static void __exit netem_module_exit(void)
747{
748 unregister_qdisc(&netem_qdisc_ops);
749}
750module_init(netem_module_init)
751module_exit(netem_module_exit)
752MODULE_LICENSE("GPL");
This page took 0.025604 seconds and 5 git commands to generate.