ath9k: add AR9580 support
[deliverable/linux.git] / net / sched / sch_sfq.c
1 /*
2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/ipv6.h>
21 #include <linux/skbuff.h>
22 #include <linux/jhash.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <net/ip.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28
29
30 /* Stochastic Fairness Queuing algorithm.
31 =======================================
32
33 Source:
34 Paul E. McKenney "Stochastic Fairness Queuing",
35 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
36
37 Paul E. McKenney "Stochastic Fairness Queuing",
38 "Interworking: Research and Experience", v.2, 1991, p.113-131.
39
40
41 See also:
42 M. Shreedhar and George Varghese "Efficient Fair
43 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
44
45
46 This is not the thing that is usually called (W)FQ nowadays.
47 It does not use any timestamp mechanism, but instead
48 processes queues in round-robin order.
49
50 ADVANTAGE:
51
52 - It is very cheap. Both CPU and memory requirements are minimal.
53
54 DRAWBACKS:
55
56 - "Stochastic" -> It is not 100% fair.
57 When hash collisions occur, several flows are considered as one.
58
59 - "Round-robin" -> It introduces larger delays than virtual clock
60 based schemes, and should not be used for isolating interactive
61 traffic from non-interactive. It means, that this scheduler
62 should be used as leaf of CBQ or P3, which put interactive traffic
63 to higher priority band.
64
65 We still need true WFQ for top level CSZ, but using WFQ
66 for the best effort traffic is absolutely pointless:
67 SFQ is superior for this purpose.
68
69 IMPLEMENTATION:
70 This implementation limits maximal queue length to 128;
71 max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
72 The only goal of this restrictions was that all data
73 fit into one 4K page on 32bit arches.
74
75 It is easy to increase these values, but not in flight. */
76
77 #define SFQ_DEPTH 128 /* max number of packets per flow */
78 #define SFQ_SLOTS 128 /* max number of flows */
79 #define SFQ_EMPTY_SLOT 255
80 #define SFQ_DEFAULT_HASH_DIVISOR 1024
81
82 /* We use 16 bits to store allot, and want to handle packets up to 64K
83 * Scale allot by 8 (1<<3) so that no overflow occurs.
84 */
85 #define SFQ_ALLOT_SHIFT 3
86 #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
87
88 /* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
89 typedef unsigned char sfq_index;
90
91 /*
92 * We dont use pointers to save space.
93 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
94 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
95 * are 'pointers' to dep[] array
96 */
97 struct sfq_head {
98 sfq_index next;
99 sfq_index prev;
100 };
101
102 struct sfq_slot {
103 struct sk_buff *skblist_next;
104 struct sk_buff *skblist_prev;
105 sfq_index qlen; /* number of skbs in skblist */
106 sfq_index next; /* next slot in sfq chain */
107 struct sfq_head dep; /* anchor in dep[] chains */
108 unsigned short hash; /* hash value (index in ht[]) */
109 short allot; /* credit for this slot */
110 };
111
112 struct sfq_sched_data {
113 /* Parameters */
114 int perturb_period;
115 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
116 int limit;
117 unsigned int divisor; /* number of slots in hash table */
118 /* Variables */
119 struct tcf_proto *filter_list;
120 struct timer_list perturb_timer;
121 u32 perturbation;
122 sfq_index cur_depth; /* depth of longest slot */
123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
124 struct sfq_slot *tail; /* current slot in round */
125 sfq_index *ht; /* Hash table (divisor slots) */
126 struct sfq_slot slots[SFQ_SLOTS];
127 struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
128 };
129
130 /*
131 * sfq_head are either in a sfq_slot or in dep[] array
132 */
133 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
134 {
135 if (val < SFQ_SLOTS)
136 return &q->slots[val].dep;
137 return &q->dep[val - SFQ_SLOTS];
138 }
139
140 static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
141 {
142 return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
143 }
144
145 static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
146 {
147 u32 h, h2;
148
149 switch (skb->protocol) {
150 case htons(ETH_P_IP):
151 {
152 const struct iphdr *iph;
153 int poff;
154
155 if (!pskb_network_may_pull(skb, sizeof(*iph)))
156 goto err;
157 iph = ip_hdr(skb);
158 h = (__force u32)iph->daddr;
159 h2 = (__force u32)iph->saddr ^ iph->protocol;
160 if (ip_is_fragment(iph))
161 break;
162 poff = proto_ports_offset(iph->protocol);
163 if (poff >= 0 &&
164 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
165 iph = ip_hdr(skb);
166 h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
167 }
168 break;
169 }
170 case htons(ETH_P_IPV6):
171 {
172 const struct ipv6hdr *iph;
173 int poff;
174
175 if (!pskb_network_may_pull(skb, sizeof(*iph)))
176 goto err;
177 iph = ipv6_hdr(skb);
178 h = (__force u32)iph->daddr.s6_addr32[3];
179 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
180 poff = proto_ports_offset(iph->nexthdr);
181 if (poff >= 0 &&
182 pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
183 iph = ipv6_hdr(skb);
184 h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
185 }
186 break;
187 }
188 default:
189 err:
190 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
191 h2 = (unsigned long)skb->sk;
192 }
193
194 return sfq_fold_hash(q, h, h2);
195 }
196
197 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
198 int *qerr)
199 {
200 struct sfq_sched_data *q = qdisc_priv(sch);
201 struct tcf_result res;
202 int result;
203
204 if (TC_H_MAJ(skb->priority) == sch->handle &&
205 TC_H_MIN(skb->priority) > 0 &&
206 TC_H_MIN(skb->priority) <= q->divisor)
207 return TC_H_MIN(skb->priority);
208
209 if (!q->filter_list)
210 return sfq_hash(q, skb) + 1;
211
212 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
213 result = tc_classify(skb, q->filter_list, &res);
214 if (result >= 0) {
215 #ifdef CONFIG_NET_CLS_ACT
216 switch (result) {
217 case TC_ACT_STOLEN:
218 case TC_ACT_QUEUED:
219 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
220 case TC_ACT_SHOT:
221 return 0;
222 }
223 #endif
224 if (TC_H_MIN(res.classid) <= q->divisor)
225 return TC_H_MIN(res.classid);
226 }
227 return 0;
228 }
229
230 /*
231 * x : slot number [0 .. SFQ_SLOTS - 1]
232 */
233 static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
234 {
235 sfq_index p, n;
236 int qlen = q->slots[x].qlen;
237
238 p = qlen + SFQ_SLOTS;
239 n = q->dep[qlen].next;
240
241 q->slots[x].dep.next = n;
242 q->slots[x].dep.prev = p;
243
244 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
245 sfq_dep_head(q, n)->prev = x;
246 }
247
248 #define sfq_unlink(q, x, n, p) \
249 n = q->slots[x].dep.next; \
250 p = q->slots[x].dep.prev; \
251 sfq_dep_head(q, p)->next = n; \
252 sfq_dep_head(q, n)->prev = p
253
254
255 static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
256 {
257 sfq_index p, n;
258 int d;
259
260 sfq_unlink(q, x, n, p);
261
262 d = q->slots[x].qlen--;
263 if (n == p && q->cur_depth == d)
264 q->cur_depth--;
265 sfq_link(q, x);
266 }
267
268 static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
269 {
270 sfq_index p, n;
271 int d;
272
273 sfq_unlink(q, x, n, p);
274
275 d = ++q->slots[x].qlen;
276 if (q->cur_depth < d)
277 q->cur_depth = d;
278 sfq_link(q, x);
279 }
280
281 /* helper functions : might be changed when/if skb use a standard list_head */
282
283 /* remove one skb from tail of slot queue */
284 static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
285 {
286 struct sk_buff *skb = slot->skblist_prev;
287
288 slot->skblist_prev = skb->prev;
289 skb->prev->next = (struct sk_buff *)slot;
290 skb->next = skb->prev = NULL;
291 return skb;
292 }
293
294 /* remove one skb from head of slot queue */
295 static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
296 {
297 struct sk_buff *skb = slot->skblist_next;
298
299 slot->skblist_next = skb->next;
300 skb->next->prev = (struct sk_buff *)slot;
301 skb->next = skb->prev = NULL;
302 return skb;
303 }
304
305 static inline void slot_queue_init(struct sfq_slot *slot)
306 {
307 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
308 }
309
310 /* add skb to slot queue (tail add) */
311 static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
312 {
313 skb->prev = slot->skblist_prev;
314 skb->next = (struct sk_buff *)slot;
315 slot->skblist_prev->next = skb;
316 slot->skblist_prev = skb;
317 }
318
319 #define slot_queue_walk(slot, skb) \
320 for (skb = slot->skblist_next; \
321 skb != (struct sk_buff *)slot; \
322 skb = skb->next)
323
324 static unsigned int sfq_drop(struct Qdisc *sch)
325 {
326 struct sfq_sched_data *q = qdisc_priv(sch);
327 sfq_index x, d = q->cur_depth;
328 struct sk_buff *skb;
329 unsigned int len;
330 struct sfq_slot *slot;
331
332 /* Queue is full! Find the longest slot and drop tail packet from it */
333 if (d > 1) {
334 x = q->dep[d].next;
335 slot = &q->slots[x];
336 drop:
337 skb = slot_dequeue_tail(slot);
338 len = qdisc_pkt_len(skb);
339 sfq_dec(q, x);
340 kfree_skb(skb);
341 sch->q.qlen--;
342 sch->qstats.drops++;
343 sch->qstats.backlog -= len;
344 return len;
345 }
346
347 if (d == 1) {
348 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
349 x = q->tail->next;
350 slot = &q->slots[x];
351 q->tail->next = slot->next;
352 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
353 goto drop;
354 }
355
356 return 0;
357 }
358
359 static int
360 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
361 {
362 struct sfq_sched_data *q = qdisc_priv(sch);
363 unsigned int hash;
364 sfq_index x, qlen;
365 struct sfq_slot *slot;
366 int uninitialized_var(ret);
367
368 hash = sfq_classify(skb, sch, &ret);
369 if (hash == 0) {
370 if (ret & __NET_XMIT_BYPASS)
371 sch->qstats.drops++;
372 kfree_skb(skb);
373 return ret;
374 }
375 hash--;
376
377 x = q->ht[hash];
378 slot = &q->slots[x];
379 if (x == SFQ_EMPTY_SLOT) {
380 x = q->dep[0].next; /* get a free slot */
381 q->ht[hash] = x;
382 slot = &q->slots[x];
383 slot->hash = hash;
384 }
385
386 /* If selected queue has length q->limit, do simple tail drop,
387 * i.e. drop _this_ packet.
388 */
389 if (slot->qlen >= q->limit)
390 return qdisc_drop(skb, sch);
391
392 sch->qstats.backlog += qdisc_pkt_len(skb);
393 slot_queue_add(slot, skb);
394 sfq_inc(q, x);
395 if (slot->qlen == 1) { /* The flow is new */
396 if (q->tail == NULL) { /* It is the first flow */
397 slot->next = x;
398 } else {
399 slot->next = q->tail->next;
400 q->tail->next = x;
401 }
402 q->tail = slot;
403 slot->allot = q->scaled_quantum;
404 }
405 if (++sch->q.qlen <= q->limit)
406 return NET_XMIT_SUCCESS;
407
408 qlen = slot->qlen;
409 sfq_drop(sch);
410 /* Return Congestion Notification only if we dropped a packet
411 * from this flow.
412 */
413 return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
414 }
415
416 static struct sk_buff *
417 sfq_dequeue(struct Qdisc *sch)
418 {
419 struct sfq_sched_data *q = qdisc_priv(sch);
420 struct sk_buff *skb;
421 sfq_index a, next_a;
422 struct sfq_slot *slot;
423
424 /* No active slots */
425 if (q->tail == NULL)
426 return NULL;
427
428 next_slot:
429 a = q->tail->next;
430 slot = &q->slots[a];
431 if (slot->allot <= 0) {
432 q->tail = slot;
433 slot->allot += q->scaled_quantum;
434 goto next_slot;
435 }
436 skb = slot_dequeue_head(slot);
437 sfq_dec(q, a);
438 qdisc_bstats_update(sch, skb);
439 sch->q.qlen--;
440 sch->qstats.backlog -= qdisc_pkt_len(skb);
441
442 /* Is the slot empty? */
443 if (slot->qlen == 0) {
444 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
445 next_a = slot->next;
446 if (a == next_a) {
447 q->tail = NULL; /* no more active slots */
448 return skb;
449 }
450 q->tail->next = next_a;
451 } else {
452 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
453 }
454 return skb;
455 }
456
457 static void
458 sfq_reset(struct Qdisc *sch)
459 {
460 struct sk_buff *skb;
461
462 while ((skb = sfq_dequeue(sch)) != NULL)
463 kfree_skb(skb);
464 }
465
466 static void sfq_perturbation(unsigned long arg)
467 {
468 struct Qdisc *sch = (struct Qdisc *)arg;
469 struct sfq_sched_data *q = qdisc_priv(sch);
470
471 q->perturbation = net_random();
472
473 if (q->perturb_period)
474 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
475 }
476
477 static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
478 {
479 struct sfq_sched_data *q = qdisc_priv(sch);
480 struct tc_sfq_qopt *ctl = nla_data(opt);
481 unsigned int qlen;
482
483 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
484 return -EINVAL;
485
486 if (ctl->divisor &&
487 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
488 return -EINVAL;
489
490 sch_tree_lock(sch);
491 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
492 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
493 q->perturb_period = ctl->perturb_period * HZ;
494 if (ctl->limit)
495 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
496 if (ctl->divisor)
497 q->divisor = ctl->divisor;
498 qlen = sch->q.qlen;
499 while (sch->q.qlen > q->limit)
500 sfq_drop(sch);
501 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
502
503 del_timer(&q->perturb_timer);
504 if (q->perturb_period) {
505 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
506 q->perturbation = net_random();
507 }
508 sch_tree_unlock(sch);
509 return 0;
510 }
511
512 static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
513 {
514 struct sfq_sched_data *q = qdisc_priv(sch);
515 size_t sz;
516 int i;
517
518 q->perturb_timer.function = sfq_perturbation;
519 q->perturb_timer.data = (unsigned long)sch;
520 init_timer_deferrable(&q->perturb_timer);
521
522 for (i = 0; i < SFQ_DEPTH; i++) {
523 q->dep[i].next = i + SFQ_SLOTS;
524 q->dep[i].prev = i + SFQ_SLOTS;
525 }
526
527 q->limit = SFQ_DEPTH - 1;
528 q->cur_depth = 0;
529 q->tail = NULL;
530 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
531 if (opt == NULL) {
532 q->quantum = psched_mtu(qdisc_dev(sch));
533 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
534 q->perturb_period = 0;
535 q->perturbation = net_random();
536 } else {
537 int err = sfq_change(sch, opt);
538 if (err)
539 return err;
540 }
541
542 sz = sizeof(q->ht[0]) * q->divisor;
543 q->ht = kmalloc(sz, GFP_KERNEL);
544 if (!q->ht && sz > PAGE_SIZE)
545 q->ht = vmalloc(sz);
546 if (!q->ht)
547 return -ENOMEM;
548 for (i = 0; i < q->divisor; i++)
549 q->ht[i] = SFQ_EMPTY_SLOT;
550
551 for (i = 0; i < SFQ_SLOTS; i++) {
552 slot_queue_init(&q->slots[i]);
553 sfq_link(q, i);
554 }
555 if (q->limit >= 1)
556 sch->flags |= TCQ_F_CAN_BYPASS;
557 else
558 sch->flags &= ~TCQ_F_CAN_BYPASS;
559 return 0;
560 }
561
562 static void sfq_destroy(struct Qdisc *sch)
563 {
564 struct sfq_sched_data *q = qdisc_priv(sch);
565
566 tcf_destroy_chain(&q->filter_list);
567 q->perturb_period = 0;
568 del_timer_sync(&q->perturb_timer);
569 if (is_vmalloc_addr(q->ht))
570 vfree(q->ht);
571 else
572 kfree(q->ht);
573 }
574
575 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
576 {
577 struct sfq_sched_data *q = qdisc_priv(sch);
578 unsigned char *b = skb_tail_pointer(skb);
579 struct tc_sfq_qopt opt;
580
581 opt.quantum = q->quantum;
582 opt.perturb_period = q->perturb_period / HZ;
583
584 opt.limit = q->limit;
585 opt.divisor = q->divisor;
586 opt.flows = q->limit;
587
588 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
589
590 return skb->len;
591
592 nla_put_failure:
593 nlmsg_trim(skb, b);
594 return -1;
595 }
596
597 static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
598 {
599 return NULL;
600 }
601
602 static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
603 {
604 return 0;
605 }
606
607 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
608 u32 classid)
609 {
610 /* we cannot bypass queue discipline anymore */
611 sch->flags &= ~TCQ_F_CAN_BYPASS;
612 return 0;
613 }
614
615 static void sfq_put(struct Qdisc *q, unsigned long cl)
616 {
617 }
618
619 static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
620 {
621 struct sfq_sched_data *q = qdisc_priv(sch);
622
623 if (cl)
624 return NULL;
625 return &q->filter_list;
626 }
627
628 static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
629 struct sk_buff *skb, struct tcmsg *tcm)
630 {
631 tcm->tcm_handle |= TC_H_MIN(cl);
632 return 0;
633 }
634
635 static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
636 struct gnet_dump *d)
637 {
638 struct sfq_sched_data *q = qdisc_priv(sch);
639 sfq_index idx = q->ht[cl - 1];
640 struct gnet_stats_queue qs = { 0 };
641 struct tc_sfq_xstats xstats = { 0 };
642 struct sk_buff *skb;
643
644 if (idx != SFQ_EMPTY_SLOT) {
645 const struct sfq_slot *slot = &q->slots[idx];
646
647 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
648 qs.qlen = slot->qlen;
649 slot_queue_walk(slot, skb)
650 qs.backlog += qdisc_pkt_len(skb);
651 }
652 if (gnet_stats_copy_queue(d, &qs) < 0)
653 return -1;
654 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
655 }
656
657 static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
658 {
659 struct sfq_sched_data *q = qdisc_priv(sch);
660 unsigned int i;
661
662 if (arg->stop)
663 return;
664
665 for (i = 0; i < q->divisor; i++) {
666 if (q->ht[i] == SFQ_EMPTY_SLOT ||
667 arg->count < arg->skip) {
668 arg->count++;
669 continue;
670 }
671 if (arg->fn(sch, i + 1, arg) < 0) {
672 arg->stop = 1;
673 break;
674 }
675 arg->count++;
676 }
677 }
678
679 static const struct Qdisc_class_ops sfq_class_ops = {
680 .leaf = sfq_leaf,
681 .get = sfq_get,
682 .put = sfq_put,
683 .tcf_chain = sfq_find_tcf,
684 .bind_tcf = sfq_bind,
685 .unbind_tcf = sfq_put,
686 .dump = sfq_dump_class,
687 .dump_stats = sfq_dump_class_stats,
688 .walk = sfq_walk,
689 };
690
691 static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
692 .cl_ops = &sfq_class_ops,
693 .id = "sfq",
694 .priv_size = sizeof(struct sfq_sched_data),
695 .enqueue = sfq_enqueue,
696 .dequeue = sfq_dequeue,
697 .peek = qdisc_peek_dequeued,
698 .drop = sfq_drop,
699 .init = sfq_init,
700 .reset = sfq_reset,
701 .destroy = sfq_destroy,
702 .change = NULL,
703 .dump = sfq_dump,
704 .owner = THIS_MODULE,
705 };
706
707 static int __init sfq_module_init(void)
708 {
709 return register_qdisc(&sfq_qdisc_ops);
710 }
711 static void __exit sfq_module_exit(void)
712 {
713 unregister_qdisc(&sfq_qdisc_ops);
714 }
715 module_init(sfq_module_init)
716 module_exit(sfq_module_exit)
717 MODULE_LICENSE("GPL");
This page took 0.059649 seconds and 5 git commands to generate.