Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/sched/sch_netem.c Network emulator | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
798b6b19 | 7 | * 2 of the License. |
1da177e4 LT |
8 | * |
9 | * Many of the algorithms and ideas for this came from | |
10297b99 | 10 | * NIST Net which is not copyrighted. |
1da177e4 LT |
11 | * |
12 | * Authors: Stephen Hemminger <shemminger@osdl.org> | |
13 | * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> | |
14 | */ | |
15 | ||
b7f080cf | 16 | #include <linux/mm.h> |
1da177e4 | 17 | #include <linux/module.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
1da177e4 LT |
19 | #include <linux/types.h> |
20 | #include <linux/kernel.h> | |
21 | #include <linux/errno.h> | |
1da177e4 | 22 | #include <linux/skbuff.h> |
78776d3f | 23 | #include <linux/vmalloc.h> |
1da177e4 | 24 | #include <linux/rtnetlink.h> |
90b41a1c | 25 | #include <linux/reciprocal_div.h> |
aec0a40a | 26 | #include <linux/rbtree.h> |
1da177e4 | 27 | |
dc5fc579 | 28 | #include <net/netlink.h> |
1da177e4 | 29 | #include <net/pkt_sched.h> |
e4ae004b | 30 | #include <net/inet_ecn.h> |
1da177e4 | 31 | |
250a65f7 | 32 | #define VERSION "1.3" |
eb229c4c | 33 | |
1da177e4 LT |
34 | /* Network Emulation Queuing algorithm. |
35 | ==================================== | |
36 | ||
37 | Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based | |
38 | Network Emulation Tool | |
39 | [2] Luigi Rizzo, DummyNet for FreeBSD | |
40 | ||
41 | ---------------------------------------------------------------- | |
42 | ||
43 | This started out as a simple way to delay outgoing packets to | |
44 | test TCP but has grown to include most of the functionality | |
45 | of a full blown network emulator like NISTnet. It can delay | |
46 | packets and add random jitter (and correlation). The random | |
47 | distribution can be loaded from a table as well to provide | |
48 | normal, Pareto, or experimental curves. Packet loss, | |
49 | duplication, and reordering can also be emulated. | |
50 | ||
51 | This qdisc does not do classification that can be handled in | |
52 | layering other disciplines. It does not need to do bandwidth | |
53 | control either since that can be handled by using token | |
54 | bucket or other rate control. | |
661b7972 | 55 | |
56 | Correlated Loss Generator models | |
57 | ||
58 | Added generation of correlated loss according to the | |
59 | "Gilbert-Elliot" model, a 4-state markov model. | |
60 | ||
61 | References: | |
62 | [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG | |
63 | [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general | |
64 | and intuitive loss model for packet networks and its implementation | |
65 | in the Netem module in the Linux kernel", available in [1] | |
66 | ||
67 | Authors: Stefano Salsano <stefano.salsano at uniroma2.it | |
68 | Fabio Ludovici <fabio.ludovici at yahoo.it> | |
1da177e4 LT |
69 | */ |
70 | ||
71 | struct netem_sched_data { | |
aec0a40a ED |
72 | /* internal t(ime)fifo qdisc uses t_root and sch->limit */ |
73 | struct rb_root t_root; | |
50612537 ED |
74 | |
75 | /* optional qdisc for classful handling (NULL at netem init) */ | |
1da177e4 | 76 | struct Qdisc *qdisc; |
50612537 | 77 | |
59cb5c67 | 78 | struct qdisc_watchdog watchdog; |
1da177e4 | 79 | |
b407621c SH |
80 | psched_tdiff_t latency; |
81 | psched_tdiff_t jitter; | |
82 | ||
1da177e4 | 83 | u32 loss; |
e4ae004b | 84 | u32 ecn; |
1da177e4 LT |
85 | u32 limit; |
86 | u32 counter; | |
87 | u32 gap; | |
1da177e4 | 88 | u32 duplicate; |
0dca51d3 | 89 | u32 reorder; |
c865e5d9 | 90 | u32 corrupt; |
7bc0f28c | 91 | u32 rate; |
90b41a1c HPP |
92 | s32 packet_overhead; |
93 | u32 cell_size; | |
94 | u32 cell_size_reciprocal; | |
95 | s32 cell_overhead; | |
1da177e4 LT |
96 | |
97 | struct crndstate { | |
b407621c SH |
98 | u32 last; |
99 | u32 rho; | |
c865e5d9 | 100 | } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; |
1da177e4 LT |
101 | |
102 | struct disttable { | |
103 | u32 size; | |
104 | s16 table[0]; | |
105 | } *delay_dist; | |
661b7972 | 106 | |
107 | enum { | |
108 | CLG_RANDOM, | |
109 | CLG_4_STATES, | |
110 | CLG_GILB_ELL, | |
111 | } loss_model; | |
112 | ||
113 | /* Correlated Loss Generation models */ | |
114 | struct clgstate { | |
115 | /* state of the Markov chain */ | |
116 | u8 state; | |
117 | ||
118 | /* 4-states and Gilbert-Elliot models */ | |
119 | u32 a1; /* p13 for 4-states or p for GE */ | |
120 | u32 a2; /* p31 for 4-states or r for GE */ | |
121 | u32 a3; /* p32 for 4-states or h for GE */ | |
122 | u32 a4; /* p14 for 4-states or 1-k for GE */ | |
123 | u32 a5; /* p23 used only in 4-states */ | |
124 | } clg; | |
125 | ||
1da177e4 LT |
126 | }; |
127 | ||
50612537 ED |
128 | /* Time stamp put into socket buffer control block |
129 | * Only valid when skbs are in our internal t(ime)fifo queue. | |
130 | */ | |
1da177e4 LT |
131 | struct netem_skb_cb { |
132 | psched_time_t time_to_send; | |
aec0a40a | 133 | ktime_t tstamp_save; |
1da177e4 LT |
134 | }; |
135 | ||
aec0a40a ED |
136 | /* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp |
137 | * to hold a rb_node structure. | |
138 | * | |
139 | * If struct sk_buff layout is changed, the following checks will complain. | |
140 | */ | |
141 | static struct rb_node *netem_rb_node(struct sk_buff *skb) | |
142 | { | |
143 | BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0); | |
144 | BUILD_BUG_ON(offsetof(struct sk_buff, prev) != | |
145 | offsetof(struct sk_buff, next) + sizeof(skb->next)); | |
146 | BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) != | |
147 | offsetof(struct sk_buff, prev) + sizeof(skb->prev)); | |
148 | BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) + | |
149 | sizeof(skb->prev) + | |
150 | sizeof(skb->tstamp)); | |
151 | return (struct rb_node *)&skb->next; | |
152 | } | |
153 | ||
154 | static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) | |
155 | { | |
156 | return (struct sk_buff *)rb; | |
157 | } | |
158 | ||
5f86173b JK |
159 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) |
160 | { | |
aec0a40a | 161 | /* we assume we can use skb next/prev/tstamp as storage for rb_node */ |
16bda13d | 162 | qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); |
175f9c1b | 163 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; |
5f86173b JK |
164 | } |
165 | ||
1da177e4 LT |
166 | /* init_crandom - initialize correlated random number generator |
167 | * Use entropy source for initial seed. | |
168 | */ | |
169 | static void init_crandom(struct crndstate *state, unsigned long rho) | |
170 | { | |
171 | state->rho = rho; | |
172 | state->last = net_random(); | |
173 | } | |
174 | ||
175 | /* get_crandom - correlated random number generator | |
176 | * Next number depends on last value. | |
177 | * rho is scaled to avoid floating point. | |
178 | */ | |
b407621c | 179 | static u32 get_crandom(struct crndstate *state) |
1da177e4 LT |
180 | { |
181 | u64 value, rho; | |
182 | unsigned long answer; | |
183 | ||
bb2f8cc0 | 184 | if (state->rho == 0) /* no correlation */ |
1da177e4 LT |
185 | return net_random(); |
186 | ||
187 | value = net_random(); | |
188 | rho = (u64)state->rho + 1; | |
189 | answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; | |
190 | state->last = answer; | |
191 | return answer; | |
192 | } | |
193 | ||
661b7972 | 194 | /* loss_4state - 4-state model loss generator |
195 | * Generates losses according to the 4-state Markov chain adopted in | |
196 | * the GI (General and Intuitive) loss model. | |
197 | */ | |
198 | static bool loss_4state(struct netem_sched_data *q) | |
199 | { | |
200 | struct clgstate *clg = &q->clg; | |
201 | u32 rnd = net_random(); | |
202 | ||
203 | /* | |
25985edc | 204 | * Makes a comparison between rnd and the transition |
661b7972 | 205 | * probabilities outgoing from the current state, then decides the |
206 | * next state and if the next packet has to be transmitted or lost. | |
207 | * The four states correspond to: | |
208 | * 1 => successfully transmitted packets within a gap period | |
209 | * 4 => isolated losses within a gap period | |
210 | * 3 => lost packets within a burst period | |
211 | * 2 => successfully transmitted packets within a burst period | |
212 | */ | |
213 | switch (clg->state) { | |
214 | case 1: | |
215 | if (rnd < clg->a4) { | |
216 | clg->state = 4; | |
217 | return true; | |
218 | } else if (clg->a4 < rnd && rnd < clg->a1) { | |
219 | clg->state = 3; | |
220 | return true; | |
221 | } else if (clg->a1 < rnd) | |
222 | clg->state = 1; | |
223 | ||
224 | break; | |
225 | case 2: | |
226 | if (rnd < clg->a5) { | |
227 | clg->state = 3; | |
228 | return true; | |
229 | } else | |
230 | clg->state = 2; | |
231 | ||
232 | break; | |
233 | case 3: | |
234 | if (rnd < clg->a3) | |
235 | clg->state = 2; | |
236 | else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { | |
237 | clg->state = 1; | |
238 | return true; | |
239 | } else if (clg->a2 + clg->a3 < rnd) { | |
240 | clg->state = 3; | |
241 | return true; | |
242 | } | |
243 | break; | |
244 | case 4: | |
245 | clg->state = 1; | |
246 | break; | |
247 | } | |
248 | ||
249 | return false; | |
250 | } | |
251 | ||
252 | /* loss_gilb_ell - Gilbert-Elliot model loss generator | |
253 | * Generates losses according to the Gilbert-Elliot loss model or | |
254 | * its special cases (Gilbert or Simple Gilbert) | |
255 | * | |
25985edc | 256 | * Makes a comparison between random number and the transition |
661b7972 | 257 | * probabilities outgoing from the current state, then decides the |
25985edc | 258 | * next state. A second random number is extracted and the comparison |
661b7972 | 259 | * with the loss probability of the current state decides if the next |
260 | * packet will be transmitted or lost. | |
261 | */ | |
262 | static bool loss_gilb_ell(struct netem_sched_data *q) | |
263 | { | |
264 | struct clgstate *clg = &q->clg; | |
265 | ||
266 | switch (clg->state) { | |
267 | case 1: | |
268 | if (net_random() < clg->a1) | |
269 | clg->state = 2; | |
270 | if (net_random() < clg->a4) | |
271 | return true; | |
272 | case 2: | |
273 | if (net_random() < clg->a2) | |
274 | clg->state = 1; | |
275 | if (clg->a3 > net_random()) | |
276 | return true; | |
277 | } | |
278 | ||
279 | return false; | |
280 | } | |
281 | ||
282 | static bool loss_event(struct netem_sched_data *q) | |
283 | { | |
284 | switch (q->loss_model) { | |
285 | case CLG_RANDOM: | |
286 | /* Random packet drop 0 => none, ~0 => all */ | |
287 | return q->loss && q->loss >= get_crandom(&q->loss_cor); | |
288 | ||
289 | case CLG_4_STATES: | |
290 | /* 4state loss model algorithm (used also for GI model) | |
291 | * Extracts a value from the markov 4 state loss generator, | |
292 | * if it is 1 drops a packet and if needed writes the event in | |
293 | * the kernel logs | |
294 | */ | |
295 | return loss_4state(q); | |
296 | ||
297 | case CLG_GILB_ELL: | |
298 | /* Gilbert-Elliot loss model algorithm | |
299 | * Extracts a value from the Gilbert-Elliot loss generator, | |
300 | * if it is 1 drops a packet and if needed writes the event in | |
301 | * the kernel logs | |
302 | */ | |
303 | return loss_gilb_ell(q); | |
304 | } | |
305 | ||
306 | return false; /* not reached */ | |
307 | } | |
308 | ||
309 | ||
1da177e4 LT |
310 | /* tabledist - return a pseudo-randomly distributed value with mean mu and |
311 | * std deviation sigma. Uses table lookup to approximate the desired | |
312 | * distribution, and a uniformly-distributed pseudo-random source. | |
313 | */ | |
b407621c SH |
314 | static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, |
315 | struct crndstate *state, | |
316 | const struct disttable *dist) | |
1da177e4 | 317 | { |
b407621c SH |
318 | psched_tdiff_t x; |
319 | long t; | |
320 | u32 rnd; | |
1da177e4 LT |
321 | |
322 | if (sigma == 0) | |
323 | return mu; | |
324 | ||
325 | rnd = get_crandom(state); | |
326 | ||
327 | /* default uniform distribution */ | |
10297b99 | 328 | if (dist == NULL) |
1da177e4 LT |
329 | return (rnd % (2*sigma)) - sigma + mu; |
330 | ||
331 | t = dist->table[rnd % dist->size]; | |
332 | x = (sigma % NETEM_DIST_SCALE) * t; | |
333 | if (x >= 0) | |
334 | x += NETEM_DIST_SCALE/2; | |
335 | else | |
336 | x -= NETEM_DIST_SCALE/2; | |
337 | ||
338 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; | |
339 | } | |
340 | ||
90b41a1c | 341 | static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) |
7bc0f28c | 342 | { |
90b41a1c | 343 | u64 ticks; |
fc33cc72 | 344 | |
90b41a1c HPP |
345 | len += q->packet_overhead; |
346 | ||
347 | if (q->cell_size) { | |
348 | u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); | |
349 | ||
350 | if (len > cells * q->cell_size) /* extra cell needed for remainder */ | |
351 | cells++; | |
352 | len = cells * (q->cell_size + q->cell_overhead); | |
353 | } | |
354 | ||
355 | ticks = (u64)len * NSEC_PER_SEC; | |
356 | ||
357 | do_div(ticks, q->rate); | |
fc33cc72 | 358 | return PSCHED_NS2TICKS(ticks); |
7bc0f28c HPP |
359 | } |
360 | ||
960fb66e | 361 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) |
50612537 | 362 | { |
aec0a40a | 363 | struct netem_sched_data *q = qdisc_priv(sch); |
50612537 | 364 | psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; |
aec0a40a | 365 | struct rb_node **p = &q->t_root.rb_node, *parent = NULL; |
50612537 | 366 | |
aec0a40a ED |
367 | while (*p) { |
368 | struct sk_buff *skb; | |
50612537 | 369 | |
aec0a40a ED |
370 | parent = *p; |
371 | skb = netem_rb_to_skb(parent); | |
960fb66e | 372 | if (tnext >= netem_skb_cb(skb)->time_to_send) |
aec0a40a ED |
373 | p = &parent->rb_right; |
374 | else | |
375 | p = &parent->rb_left; | |
50612537 | 376 | } |
aec0a40a ED |
377 | rb_link_node(netem_rb_node(nskb), parent, p); |
378 | rb_insert_color(netem_rb_node(nskb), &q->t_root); | |
379 | sch->q.qlen++; | |
50612537 ED |
380 | } |
381 | ||
0afb51e7 SH |
382 | /* |
383 | * Insert one skb into qdisc. | |
384 | * Note: parent depends on return value to account for queue length. | |
385 | * NET_XMIT_DROP: queue length didn't change. | |
386 | * NET_XMIT_SUCCESS: one skb was queued. | |
387 | */ | |
1da177e4 LT |
388 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
389 | { | |
390 | struct netem_sched_data *q = qdisc_priv(sch); | |
89e1df74 GC |
391 | /* We don't fill cb now as skb_unshare() may invalidate it */ |
392 | struct netem_skb_cb *cb; | |
0afb51e7 | 393 | struct sk_buff *skb2; |
0afb51e7 | 394 | int count = 1; |
1da177e4 | 395 | |
0afb51e7 SH |
396 | /* Random duplication */ |
397 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | |
398 | ++count; | |
399 | ||
661b7972 | 400 | /* Drop packet? */ |
e4ae004b ED |
401 | if (loss_event(q)) { |
402 | if (q->ecn && INET_ECN_set_ce(skb)) | |
403 | sch->qstats.drops++; /* mark packet */ | |
404 | else | |
405 | --count; | |
406 | } | |
0afb51e7 | 407 | if (count == 0) { |
1da177e4 LT |
408 | sch->qstats.drops++; |
409 | kfree_skb(skb); | |
c27f339a | 410 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
1da177e4 LT |
411 | } |
412 | ||
5a308f40 ED |
413 | /* If a delay is expected, orphan the skb. (orphaning usually takes |
414 | * place at TX completion time, so _before_ the link transit delay) | |
5a308f40 ED |
415 | */ |
416 | if (q->latency || q->jitter) | |
f2f872f9 | 417 | skb_orphan_partial(skb); |
4e8a5201 | 418 | |
0afb51e7 SH |
419 | /* |
420 | * If we need to duplicate packet, then re-insert at top of the | |
421 | * qdisc tree, since parent queuer expects that only one | |
422 | * skb will be queued. | |
423 | */ | |
424 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | |
7698b4fc | 425 | struct Qdisc *rootq = qdisc_root(sch); |
0afb51e7 SH |
426 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ |
427 | q->duplicate = 0; | |
428 | ||
5f86173b | 429 | qdisc_enqueue_root(skb2, rootq); |
0afb51e7 | 430 | q->duplicate = dupsave; |
1da177e4 LT |
431 | } |
432 | ||
c865e5d9 SH |
433 | /* |
434 | * Randomized packet corruption. | |
435 | * Make copy if needed since we are modifying | |
436 | * If packet is going to be hardware checksummed, then | |
437 | * do it now in software before we mangle it. | |
438 | */ | |
439 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { | |
f64f9e71 JP |
440 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || |
441 | (skb->ip_summed == CHECKSUM_PARTIAL && | |
116a0fc3 ED |
442 | skb_checksum_help(skb))) |
443 | return qdisc_drop(skb, sch); | |
c865e5d9 SH |
444 | |
445 | skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); | |
446 | } | |
447 | ||
960fb66e ED |
448 | if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) |
449 | return qdisc_reshape_fail(skb, sch); | |
450 | ||
451 | sch->qstats.backlog += qdisc_pkt_len(skb); | |
452 | ||
5f86173b | 453 | cb = netem_skb_cb(skb); |
cc7ec456 | 454 | if (q->gap == 0 || /* not doing reordering */ |
a42b4799 | 455 | q->counter < q->gap - 1 || /* inside last reordering gap */ |
f64f9e71 | 456 | q->reorder < get_crandom(&q->reorder_cor)) { |
0f9f32ac | 457 | psched_time_t now; |
07aaa115 SH |
458 | psched_tdiff_t delay; |
459 | ||
460 | delay = tabledist(q->latency, q->jitter, | |
461 | &q->delay_cor, q->delay_dist); | |
462 | ||
3bebcda2 | 463 | now = psched_get_time(); |
7bc0f28c HPP |
464 | |
465 | if (q->rate) { | |
aec0a40a | 466 | struct sk_buff *last; |
7bc0f28c | 467 | |
aec0a40a ED |
468 | if (!skb_queue_empty(&sch->q)) |
469 | last = skb_peek_tail(&sch->q); | |
470 | else | |
471 | last = netem_rb_to_skb(rb_last(&q->t_root)); | |
472 | if (last) { | |
7bc0f28c | 473 | /* |
a13d3104 JN |
474 | * Last packet in queue is reference point (now), |
475 | * calculate this time bonus and subtract | |
7bc0f28c HPP |
476 | * from delay. |
477 | */ | |
aec0a40a | 478 | delay -= netem_skb_cb(last)->time_to_send - now; |
a13d3104 | 479 | delay = max_t(psched_tdiff_t, 0, delay); |
aec0a40a | 480 | now = netem_skb_cb(last)->time_to_send; |
7bc0f28c | 481 | } |
a13d3104 JN |
482 | |
483 | delay += packet_len_2_sched_time(skb->len, q); | |
7bc0f28c HPP |
484 | } |
485 | ||
7c59e25f | 486 | cb->time_to_send = now + delay; |
aec0a40a | 487 | cb->tstamp_save = skb->tstamp; |
1da177e4 | 488 | ++q->counter; |
960fb66e | 489 | tfifo_enqueue(skb, sch); |
1da177e4 | 490 | } else { |
10297b99 | 491 | /* |
0dca51d3 SH |
492 | * Do re-ordering by putting one out of N packets at the front |
493 | * of the queue. | |
494 | */ | |
3bebcda2 | 495 | cb->time_to_send = psched_get_time(); |
0dca51d3 | 496 | q->counter = 0; |
8ba25dad | 497 | |
50612537 | 498 | __skb_queue_head(&sch->q, skb); |
eb101924 | 499 | sch->qstats.requeues++; |
378a2f09 | 500 | } |
1da177e4 | 501 | |
10f6dfcf | 502 | return NET_XMIT_SUCCESS; |
1da177e4 LT |
503 | } |
504 | ||
cc7ec456 | 505 | static unsigned int netem_drop(struct Qdisc *sch) |
1da177e4 LT |
506 | { |
507 | struct netem_sched_data *q = qdisc_priv(sch); | |
50612537 | 508 | unsigned int len; |
1da177e4 | 509 | |
50612537 | 510 | len = qdisc_queue_drop(sch); |
aec0a40a ED |
511 | |
512 | if (!len) { | |
513 | struct rb_node *p = rb_first(&q->t_root); | |
514 | ||
515 | if (p) { | |
516 | struct sk_buff *skb = netem_rb_to_skb(p); | |
517 | ||
518 | rb_erase(p, &q->t_root); | |
519 | sch->q.qlen--; | |
520 | skb->next = NULL; | |
521 | skb->prev = NULL; | |
522 | len = qdisc_pkt_len(skb); | |
523 | kfree_skb(skb); | |
524 | } | |
525 | } | |
50612537 ED |
526 | if (!len && q->qdisc && q->qdisc->ops->drop) |
527 | len = q->qdisc->ops->drop(q->qdisc); | |
528 | if (len) | |
1da177e4 | 529 | sch->qstats.drops++; |
50612537 | 530 | |
1da177e4 LT |
531 | return len; |
532 | } | |
533 | ||
1da177e4 LT |
534 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
535 | { | |
536 | struct netem_sched_data *q = qdisc_priv(sch); | |
537 | struct sk_buff *skb; | |
aec0a40a | 538 | struct rb_node *p; |
1da177e4 | 539 | |
fd245a4a | 540 | if (qdisc_is_throttled(sch)) |
11274e5a SH |
541 | return NULL; |
542 | ||
50612537 | 543 | tfifo_dequeue: |
aec0a40a | 544 | skb = __skb_dequeue(&sch->q); |
771018e7 | 545 | if (skb) { |
aec0a40a ED |
546 | deliver: |
547 | sch->qstats.backlog -= qdisc_pkt_len(skb); | |
548 | qdisc_unthrottled(sch); | |
549 | qdisc_bstats_update(sch, skb); | |
550 | return skb; | |
551 | } | |
552 | p = rb_first(&q->t_root); | |
553 | if (p) { | |
36b7bfe0 ED |
554 | psched_time_t time_to_send; |
555 | ||
aec0a40a | 556 | skb = netem_rb_to_skb(p); |
0f9f32ac SH |
557 | |
558 | /* if more time remaining? */ | |
36b7bfe0 ED |
559 | time_to_send = netem_skb_cb(skb)->time_to_send; |
560 | if (time_to_send <= psched_get_time()) { | |
aec0a40a ED |
561 | rb_erase(p, &q->t_root); |
562 | ||
563 | sch->q.qlen--; | |
564 | skb->next = NULL; | |
565 | skb->prev = NULL; | |
566 | skb->tstamp = netem_skb_cb(skb)->tstamp_save; | |
03c05f0d | 567 | |
8caf1539 JP |
568 | #ifdef CONFIG_NET_CLS_ACT |
569 | /* | |
570 | * If it's at ingress let's pretend the delay is | |
571 | * from the network (tstamp will be updated). | |
572 | */ | |
573 | if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) | |
574 | skb->tstamp.tv64 = 0; | |
575 | #endif | |
10f6dfcf | 576 | |
50612537 ED |
577 | if (q->qdisc) { |
578 | int err = qdisc_enqueue(skb, q->qdisc); | |
579 | ||
580 | if (unlikely(err != NET_XMIT_SUCCESS)) { | |
581 | if (net_xmit_drop_count(err)) { | |
582 | sch->qstats.drops++; | |
583 | qdisc_tree_decrease_qlen(sch, 1); | |
584 | } | |
585 | } | |
586 | goto tfifo_dequeue; | |
587 | } | |
aec0a40a | 588 | goto deliver; |
07aaa115 | 589 | } |
11274e5a | 590 | |
50612537 ED |
591 | if (q->qdisc) { |
592 | skb = q->qdisc->ops->dequeue(q->qdisc); | |
593 | if (skb) | |
594 | goto deliver; | |
595 | } | |
36b7bfe0 | 596 | qdisc_watchdog_schedule(&q->watchdog, time_to_send); |
0f9f32ac SH |
597 | } |
598 | ||
50612537 ED |
599 | if (q->qdisc) { |
600 | skb = q->qdisc->ops->dequeue(q->qdisc); | |
601 | if (skb) | |
602 | goto deliver; | |
603 | } | |
0f9f32ac | 604 | return NULL; |
1da177e4 LT |
605 | } |
606 | ||
1da177e4 LT |
607 | static void netem_reset(struct Qdisc *sch) |
608 | { | |
609 | struct netem_sched_data *q = qdisc_priv(sch); | |
610 | ||
50612537 ED |
611 | qdisc_reset_queue(sch); |
612 | if (q->qdisc) | |
613 | qdisc_reset(q->qdisc); | |
59cb5c67 | 614 | qdisc_watchdog_cancel(&q->watchdog); |
1da177e4 LT |
615 | } |
616 | ||
6373a9a2 | 617 | static void dist_free(struct disttable *d) |
618 | { | |
619 | if (d) { | |
620 | if (is_vmalloc_addr(d)) | |
621 | vfree(d); | |
622 | else | |
623 | kfree(d); | |
624 | } | |
625 | } | |
626 | ||
1da177e4 LT |
627 | /* |
628 | * Distribution data is a variable size payload containing | |
629 | * signed 16 bit values. | |
630 | */ | |
1e90474c | 631 | static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) |
1da177e4 LT |
632 | { |
633 | struct netem_sched_data *q = qdisc_priv(sch); | |
6373a9a2 | 634 | size_t n = nla_len(attr)/sizeof(__s16); |
1e90474c | 635 | const __s16 *data = nla_data(attr); |
7698b4fc | 636 | spinlock_t *root_lock; |
1da177e4 LT |
637 | struct disttable *d; |
638 | int i; | |
6373a9a2 | 639 | size_t s; |
1da177e4 | 640 | |
df173bda | 641 | if (n > NETEM_DIST_MAX) |
1da177e4 LT |
642 | return -EINVAL; |
643 | ||
6373a9a2 | 644 | s = sizeof(struct disttable) + n * sizeof(s16); |
bb52c7ac | 645 | d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); |
6373a9a2 | 646 | if (!d) |
647 | d = vmalloc(s); | |
1da177e4 LT |
648 | if (!d) |
649 | return -ENOMEM; | |
650 | ||
651 | d->size = n; | |
652 | for (i = 0; i < n; i++) | |
653 | d->table[i] = data[i]; | |
10297b99 | 654 | |
102396ae | 655 | root_lock = qdisc_root_sleeping_lock(sch); |
7698b4fc DM |
656 | |
657 | spin_lock_bh(root_lock); | |
bb52c7ac | 658 | swap(q->delay_dist, d); |
7698b4fc | 659 | spin_unlock_bh(root_lock); |
bb52c7ac ED |
660 | |
661 | dist_free(d); | |
1da177e4 LT |
662 | return 0; |
663 | } | |
664 | ||
265eb67f | 665 | static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) |
1da177e4 LT |
666 | { |
667 | struct netem_sched_data *q = qdisc_priv(sch); | |
1e90474c | 668 | const struct tc_netem_corr *c = nla_data(attr); |
1da177e4 | 669 | |
1da177e4 LT |
670 | init_crandom(&q->delay_cor, c->delay_corr); |
671 | init_crandom(&q->loss_cor, c->loss_corr); | |
672 | init_crandom(&q->dup_cor, c->dup_corr); | |
1da177e4 LT |
673 | } |
674 | ||
265eb67f | 675 | static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) |
0dca51d3 SH |
676 | { |
677 | struct netem_sched_data *q = qdisc_priv(sch); | |
1e90474c | 678 | const struct tc_netem_reorder *r = nla_data(attr); |
0dca51d3 | 679 | |
0dca51d3 SH |
680 | q->reorder = r->probability; |
681 | init_crandom(&q->reorder_cor, r->correlation); | |
0dca51d3 SH |
682 | } |
683 | ||
265eb67f | 684 | static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) |
c865e5d9 SH |
685 | { |
686 | struct netem_sched_data *q = qdisc_priv(sch); | |
1e90474c | 687 | const struct tc_netem_corrupt *r = nla_data(attr); |
c865e5d9 | 688 | |
c865e5d9 SH |
689 | q->corrupt = r->probability; |
690 | init_crandom(&q->corrupt_cor, r->correlation); | |
c865e5d9 SH |
691 | } |
692 | ||
7bc0f28c HPP |
693 | static void get_rate(struct Qdisc *sch, const struct nlattr *attr) |
694 | { | |
695 | struct netem_sched_data *q = qdisc_priv(sch); | |
696 | const struct tc_netem_rate *r = nla_data(attr); | |
697 | ||
698 | q->rate = r->rate; | |
90b41a1c HPP |
699 | q->packet_overhead = r->packet_overhead; |
700 | q->cell_size = r->cell_size; | |
701 | if (q->cell_size) | |
702 | q->cell_size_reciprocal = reciprocal_value(q->cell_size); | |
703 | q->cell_overhead = r->cell_overhead; | |
7bc0f28c HPP |
704 | } |
705 | ||
661b7972 | 706 | static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) |
707 | { | |
708 | struct netem_sched_data *q = qdisc_priv(sch); | |
709 | const struct nlattr *la; | |
710 | int rem; | |
711 | ||
712 | nla_for_each_nested(la, attr, rem) { | |
713 | u16 type = nla_type(la); | |
714 | ||
715 | switch(type) { | |
716 | case NETEM_LOSS_GI: { | |
717 | const struct tc_netem_gimodel *gi = nla_data(la); | |
718 | ||
2494654d | 719 | if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { |
661b7972 | 720 | pr_info("netem: incorrect gi model size\n"); |
721 | return -EINVAL; | |
722 | } | |
723 | ||
724 | q->loss_model = CLG_4_STATES; | |
725 | ||
726 | q->clg.state = 1; | |
727 | q->clg.a1 = gi->p13; | |
728 | q->clg.a2 = gi->p31; | |
729 | q->clg.a3 = gi->p32; | |
730 | q->clg.a4 = gi->p14; | |
731 | q->clg.a5 = gi->p23; | |
732 | break; | |
733 | } | |
734 | ||
735 | case NETEM_LOSS_GE: { | |
736 | const struct tc_netem_gemodel *ge = nla_data(la); | |
737 | ||
2494654d | 738 | if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { |
739 | pr_info("netem: incorrect ge model size\n"); | |
661b7972 | 740 | return -EINVAL; |
741 | } | |
742 | ||
743 | q->loss_model = CLG_GILB_ELL; | |
744 | q->clg.state = 1; | |
745 | q->clg.a1 = ge->p; | |
746 | q->clg.a2 = ge->r; | |
747 | q->clg.a3 = ge->h; | |
748 | q->clg.a4 = ge->k1; | |
749 | break; | |
750 | } | |
751 | ||
752 | default: | |
753 | pr_info("netem: unknown loss type %u\n", type); | |
754 | return -EINVAL; | |
755 | } | |
756 | } | |
757 | ||
758 | return 0; | |
759 | } | |
760 | ||
27a3421e PM |
761 | static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { |
762 | [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, | |
763 | [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, | |
764 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, | |
7bc0f28c | 765 | [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, |
661b7972 | 766 | [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, |
e4ae004b | 767 | [TCA_NETEM_ECN] = { .type = NLA_U32 }, |
27a3421e PM |
768 | }; |
769 | ||
2c10b32b TG |
770 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, |
771 | const struct nla_policy *policy, int len) | |
772 | { | |
773 | int nested_len = nla_len(nla) - NLA_ALIGN(len); | |
774 | ||
661b7972 | 775 | if (nested_len < 0) { |
776 | pr_info("netem: invalid attributes len %d\n", nested_len); | |
2c10b32b | 777 | return -EINVAL; |
661b7972 | 778 | } |
779 | ||
2c10b32b TG |
780 | if (nested_len >= nla_attr_size(0)) |
781 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), | |
782 | nested_len, policy); | |
661b7972 | 783 | |
2c10b32b TG |
784 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); |
785 | return 0; | |
786 | } | |
787 | ||
c865e5d9 | 788 | /* Parse netlink message to set options */ |
1e90474c | 789 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
790 | { |
791 | struct netem_sched_data *q = qdisc_priv(sch); | |
b03f4672 | 792 | struct nlattr *tb[TCA_NETEM_MAX + 1]; |
1da177e4 LT |
793 | struct tc_netem_qopt *qopt; |
794 | int ret; | |
10297b99 | 795 | |
b03f4672 | 796 | if (opt == NULL) |
1da177e4 LT |
797 | return -EINVAL; |
798 | ||
2c10b32b TG |
799 | qopt = nla_data(opt); |
800 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); | |
b03f4672 PM |
801 | if (ret < 0) |
802 | return ret; | |
803 | ||
50612537 | 804 | sch->limit = qopt->limit; |
10297b99 | 805 | |
1da177e4 LT |
806 | q->latency = qopt->latency; |
807 | q->jitter = qopt->jitter; | |
808 | q->limit = qopt->limit; | |
809 | q->gap = qopt->gap; | |
0dca51d3 | 810 | q->counter = 0; |
1da177e4 LT |
811 | q->loss = qopt->loss; |
812 | q->duplicate = qopt->duplicate; | |
813 | ||
bb2f8cc0 SH |
814 | /* for compatibility with earlier versions. |
815 | * if gap is set, need to assume 100% probability | |
0dca51d3 | 816 | */ |
a362e0a7 SH |
817 | if (q->gap) |
818 | q->reorder = ~0; | |
0dca51d3 | 819 | |
265eb67f SH |
820 | if (tb[TCA_NETEM_CORR]) |
821 | get_correlation(sch, tb[TCA_NETEM_CORR]); | |
1da177e4 | 822 | |
b03f4672 PM |
823 | if (tb[TCA_NETEM_DELAY_DIST]) { |
824 | ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); | |
825 | if (ret) | |
826 | return ret; | |
827 | } | |
c865e5d9 | 828 | |
265eb67f SH |
829 | if (tb[TCA_NETEM_REORDER]) |
830 | get_reorder(sch, tb[TCA_NETEM_REORDER]); | |
1da177e4 | 831 | |
265eb67f SH |
832 | if (tb[TCA_NETEM_CORRUPT]) |
833 | get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); | |
1da177e4 | 834 | |
7bc0f28c HPP |
835 | if (tb[TCA_NETEM_RATE]) |
836 | get_rate(sch, tb[TCA_NETEM_RATE]); | |
837 | ||
e4ae004b ED |
838 | if (tb[TCA_NETEM_ECN]) |
839 | q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); | |
840 | ||
661b7972 | 841 | q->loss_model = CLG_RANDOM; |
842 | if (tb[TCA_NETEM_LOSS]) | |
843 | ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); | |
844 | ||
845 | return ret; | |
1da177e4 LT |
846 | } |
847 | ||
1e90474c | 848 | static int netem_init(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
849 | { |
850 | struct netem_sched_data *q = qdisc_priv(sch); | |
851 | int ret; | |
852 | ||
853 | if (!opt) | |
854 | return -EINVAL; | |
855 | ||
59cb5c67 | 856 | qdisc_watchdog_init(&q->watchdog, sch); |
1da177e4 | 857 | |
661b7972 | 858 | q->loss_model = CLG_RANDOM; |
1da177e4 | 859 | ret = netem_change(sch, opt); |
50612537 | 860 | if (ret) |
250a65f7 | 861 | pr_info("netem: change failed\n"); |
1da177e4 LT |
862 | return ret; |
863 | } | |
864 | ||
865 | static void netem_destroy(struct Qdisc *sch) | |
866 | { | |
867 | struct netem_sched_data *q = qdisc_priv(sch); | |
868 | ||
59cb5c67 | 869 | qdisc_watchdog_cancel(&q->watchdog); |
50612537 ED |
870 | if (q->qdisc) |
871 | qdisc_destroy(q->qdisc); | |
6373a9a2 | 872 | dist_free(q->delay_dist); |
1da177e4 LT |
873 | } |
874 | ||
661b7972 | 875 | static int dump_loss_model(const struct netem_sched_data *q, |
876 | struct sk_buff *skb) | |
877 | { | |
878 | struct nlattr *nest; | |
879 | ||
880 | nest = nla_nest_start(skb, TCA_NETEM_LOSS); | |
881 | if (nest == NULL) | |
882 | goto nla_put_failure; | |
883 | ||
884 | switch (q->loss_model) { | |
885 | case CLG_RANDOM: | |
886 | /* legacy loss model */ | |
887 | nla_nest_cancel(skb, nest); | |
888 | return 0; /* no data */ | |
889 | ||
890 | case CLG_4_STATES: { | |
891 | struct tc_netem_gimodel gi = { | |
892 | .p13 = q->clg.a1, | |
893 | .p31 = q->clg.a2, | |
894 | .p32 = q->clg.a3, | |
895 | .p14 = q->clg.a4, | |
896 | .p23 = q->clg.a5, | |
897 | }; | |
898 | ||
1b34ec43 DM |
899 | if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) |
900 | goto nla_put_failure; | |
661b7972 | 901 | break; |
902 | } | |
903 | case CLG_GILB_ELL: { | |
904 | struct tc_netem_gemodel ge = { | |
905 | .p = q->clg.a1, | |
906 | .r = q->clg.a2, | |
907 | .h = q->clg.a3, | |
908 | .k1 = q->clg.a4, | |
909 | }; | |
910 | ||
1b34ec43 DM |
911 | if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) |
912 | goto nla_put_failure; | |
661b7972 | 913 | break; |
914 | } | |
915 | } | |
916 | ||
917 | nla_nest_end(skb, nest); | |
918 | return 0; | |
919 | ||
920 | nla_put_failure: | |
921 | nla_nest_cancel(skb, nest); | |
922 | return -1; | |
923 | } | |
924 | ||
1da177e4 LT |
925 | static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) |
926 | { | |
927 | const struct netem_sched_data *q = qdisc_priv(sch); | |
861d7f74 | 928 | struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); |
1da177e4 LT |
929 | struct tc_netem_qopt qopt; |
930 | struct tc_netem_corr cor; | |
0dca51d3 | 931 | struct tc_netem_reorder reorder; |
c865e5d9 | 932 | struct tc_netem_corrupt corrupt; |
7bc0f28c | 933 | struct tc_netem_rate rate; |
1da177e4 LT |
934 | |
935 | qopt.latency = q->latency; | |
936 | qopt.jitter = q->jitter; | |
937 | qopt.limit = q->limit; | |
938 | qopt.loss = q->loss; | |
939 | qopt.gap = q->gap; | |
940 | qopt.duplicate = q->duplicate; | |
1b34ec43 DM |
941 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) |
942 | goto nla_put_failure; | |
1da177e4 LT |
943 | |
944 | cor.delay_corr = q->delay_cor.rho; | |
945 | cor.loss_corr = q->loss_cor.rho; | |
946 | cor.dup_corr = q->dup_cor.rho; | |
1b34ec43 DM |
947 | if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) |
948 | goto nla_put_failure; | |
0dca51d3 SH |
949 | |
950 | reorder.probability = q->reorder; | |
951 | reorder.correlation = q->reorder_cor.rho; | |
1b34ec43 DM |
952 | if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) |
953 | goto nla_put_failure; | |
0dca51d3 | 954 | |
c865e5d9 SH |
955 | corrupt.probability = q->corrupt; |
956 | corrupt.correlation = q->corrupt_cor.rho; | |
1b34ec43 DM |
957 | if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) |
958 | goto nla_put_failure; | |
c865e5d9 | 959 | |
7bc0f28c | 960 | rate.rate = q->rate; |
90b41a1c HPP |
961 | rate.packet_overhead = q->packet_overhead; |
962 | rate.cell_size = q->cell_size; | |
963 | rate.cell_overhead = q->cell_overhead; | |
1b34ec43 DM |
964 | if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) |
965 | goto nla_put_failure; | |
7bc0f28c | 966 | |
e4ae004b ED |
967 | if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) |
968 | goto nla_put_failure; | |
969 | ||
661b7972 | 970 | if (dump_loss_model(q, skb) != 0) |
971 | goto nla_put_failure; | |
972 | ||
861d7f74 | 973 | return nla_nest_end(skb, nla); |
1da177e4 | 974 | |
1e90474c | 975 | nla_put_failure: |
861d7f74 | 976 | nlmsg_trim(skb, nla); |
1da177e4 LT |
977 | return -1; |
978 | } | |
979 | ||
10f6dfcf | 980 | static int netem_dump_class(struct Qdisc *sch, unsigned long cl, |
981 | struct sk_buff *skb, struct tcmsg *tcm) | |
982 | { | |
983 | struct netem_sched_data *q = qdisc_priv(sch); | |
984 | ||
50612537 | 985 | if (cl != 1 || !q->qdisc) /* only one class */ |
10f6dfcf | 986 | return -ENOENT; |
987 | ||
988 | tcm->tcm_handle |= TC_H_MIN(1); | |
989 | tcm->tcm_info = q->qdisc->handle; | |
990 | ||
991 | return 0; | |
992 | } | |
993 | ||
994 | static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
995 | struct Qdisc **old) | |
996 | { | |
997 | struct netem_sched_data *q = qdisc_priv(sch); | |
998 | ||
10f6dfcf | 999 | sch_tree_lock(sch); |
1000 | *old = q->qdisc; | |
1001 | q->qdisc = new; | |
50612537 ED |
1002 | if (*old) { |
1003 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | |
1004 | qdisc_reset(*old); | |
1005 | } | |
10f6dfcf | 1006 | sch_tree_unlock(sch); |
1007 | ||
1008 | return 0; | |
1009 | } | |
1010 | ||
1011 | static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) | |
1012 | { | |
1013 | struct netem_sched_data *q = qdisc_priv(sch); | |
1014 | return q->qdisc; | |
1015 | } | |
1016 | ||
1017 | static unsigned long netem_get(struct Qdisc *sch, u32 classid) | |
1018 | { | |
1019 | return 1; | |
1020 | } | |
1021 | ||
1022 | static void netem_put(struct Qdisc *sch, unsigned long arg) | |
1023 | { | |
1024 | } | |
1025 | ||
1026 | static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |
1027 | { | |
1028 | if (!walker->stop) { | |
1029 | if (walker->count >= walker->skip) | |
1030 | if (walker->fn(sch, 1, walker) < 0) { | |
1031 | walker->stop = 1; | |
1032 | return; | |
1033 | } | |
1034 | walker->count++; | |
1035 | } | |
1036 | } | |
1037 | ||
1038 | static const struct Qdisc_class_ops netem_class_ops = { | |
1039 | .graft = netem_graft, | |
1040 | .leaf = netem_leaf, | |
1041 | .get = netem_get, | |
1042 | .put = netem_put, | |
1043 | .walk = netem_walk, | |
1044 | .dump = netem_dump_class, | |
1045 | }; | |
1046 | ||
20fea08b | 1047 | static struct Qdisc_ops netem_qdisc_ops __read_mostly = { |
1da177e4 | 1048 | .id = "netem", |
10f6dfcf | 1049 | .cl_ops = &netem_class_ops, |
1da177e4 LT |
1050 | .priv_size = sizeof(struct netem_sched_data), |
1051 | .enqueue = netem_enqueue, | |
1052 | .dequeue = netem_dequeue, | |
77be155c | 1053 | .peek = qdisc_peek_dequeued, |
1da177e4 LT |
1054 | .drop = netem_drop, |
1055 | .init = netem_init, | |
1056 | .reset = netem_reset, | |
1057 | .destroy = netem_destroy, | |
1058 | .change = netem_change, | |
1059 | .dump = netem_dump, | |
1060 | .owner = THIS_MODULE, | |
1061 | }; | |
1062 | ||
1063 | ||
1064 | static int __init netem_module_init(void) | |
1065 | { | |
eb229c4c | 1066 | pr_info("netem: version " VERSION "\n"); |
1da177e4 LT |
1067 | return register_qdisc(&netem_qdisc_ops); |
1068 | } | |
1069 | static void __exit netem_module_exit(void) | |
1070 | { | |
1071 | unregister_qdisc(&netem_qdisc_ops); | |
1072 | } | |
1073 | module_init(netem_module_init) | |
1074 | module_exit(netem_module_exit) | |
1075 | MODULE_LICENSE("GPL"); |