Merge branch '20101221_static_const' of git://repo.or.cz/linux-2.6/trivial-mods
[deliverable/linux.git] / net / sched / sch_sfq.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
1da177e4 12#include <linux/module.h>
1da177e4
LT
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
1da177e4
LT
17#include <linux/in.h>
18#include <linux/errno.h>
1da177e4 19#include <linux/init.h>
1da177e4 20#include <linux/ipv6.h>
1da177e4 21#include <linux/skbuff.h>
32740ddc 22#include <linux/jhash.h>
5a0e3ad6 23#include <linux/slab.h>
0ba48053
PM
24#include <net/ip.h>
25#include <net/netlink.h>
1da177e4
LT
26#include <net/pkt_sched.h>
27
28
29/* Stochastic Fairness Queuing algorithm.
30 =======================================
31
32 Source:
33 Paul E. McKenney "Stochastic Fairness Queuing",
34 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
35
36 Paul E. McKenney "Stochastic Fairness Queuing",
37 "Interworking: Research and Experience", v.2, 1991, p.113-131.
38
39
40 See also:
41 M. Shreedhar and George Varghese "Efficient Fair
42 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
43
44
10297b99 45 This is not the thing that is usually called (W)FQ nowadays.
1da177e4
LT
46 It does not use any timestamp mechanism, but instead
47 processes queues in round-robin order.
48
49 ADVANTAGE:
50
51 - It is very cheap. Both CPU and memory requirements are minimal.
52
53 DRAWBACKS:
54
10297b99 55 - "Stochastic" -> It is not 100% fair.
1da177e4
LT
56 When hash collisions occur, several flows are considered as one.
57
58 - "Round-robin" -> It introduces larger delays than virtual clock
59 based schemes, and should not be used for isolating interactive
60 traffic from non-interactive. It means, that this scheduler
61 should be used as leaf of CBQ or P3, which put interactive traffic
62 to higher priority band.
63
64 We still need true WFQ for top level CSZ, but using WFQ
65 for the best effort traffic is absolutely pointless:
66 SFQ is superior for this purpose.
67
68 IMPLEMENTATION:
69 This implementation limits maximal queue length to 128;
eda83e3b 70 maximal mtu to 2^15-1; max 128 flows, number of hash buckets to 1024.
1da177e4 71 The only goal of this restrictions was that all data
eda83e3b 72 fit into one 4K page on 32bit arches.
1da177e4
LT
73
74 It is easy to increase these values, but not in flight. */
75
eda83e3b
ED
76#define SFQ_DEPTH 128 /* max number of packets per flow */
77#define SFQ_SLOTS 128 /* max number of flows */
78#define SFQ_EMPTY_SLOT 255
1da177e4
LT
79#define SFQ_HASH_DIVISOR 1024
80
eda83e3b 81/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
1da177e4
LT
82typedef unsigned char sfq_index;
83
eda83e3b
ED
84/*
85 * We dont use pointers to save space.
86 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
87 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
88 * are 'pointers' to dep[] array
89 */
1da177e4
LT
90struct sfq_head
91{
92 sfq_index next;
93 sfq_index prev;
94};
95
eda83e3b
ED
96struct sfq_slot {
97 struct sk_buff *skblist_next;
98 struct sk_buff *skblist_prev;
99 sfq_index qlen; /* number of skbs in skblist */
100 sfq_index next; /* next slot in sfq chain */
101 struct sfq_head dep; /* anchor in dep[] chains */
102 unsigned short hash; /* hash value (index in ht[]) */
103 short allot; /* credit for this slot */
104};
105
1da177e4
LT
106struct sfq_sched_data
107{
108/* Parameters */
109 int perturb_period;
110 unsigned quantum; /* Allotment per round: MUST BE >= MTU */
111 int limit;
112
113/* Variables */
7d2681a6 114 struct tcf_proto *filter_list;
1da177e4 115 struct timer_list perturb_timer;
32740ddc 116 u32 perturbation;
eda83e3b 117 sfq_index cur_depth; /* depth of longest slot */
1da177e4 118
eda83e3b 119 struct sfq_slot *tail; /* current slot in round */
1da177e4 120 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
eda83e3b
ED
121 struct sfq_slot slots[SFQ_SLOTS];
122 struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
1da177e4
LT
123};
124
eda83e3b
ED
125/*
126 * sfq_head are either in a sfq_slot or in dep[] array
127 */
128static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
129{
130 if (val < SFQ_SLOTS)
131 return &q->slots[val].dep;
132 return &q->dep[val - SFQ_SLOTS];
133}
134
1da177e4
LT
135static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
136{
32740ddc 137 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
1da177e4
LT
138}
139
140static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
141{
142 u32 h, h2;
143
144 switch (skb->protocol) {
60678040 145 case htons(ETH_P_IP):
1da177e4 146 {
f2f00981 147 const struct iphdr *iph;
b9959c2e 148 int poff;
f2f00981
CG
149
150 if (!pskb_network_may_pull(skb, sizeof(*iph)))
151 goto err;
152 iph = ip_hdr(skb);
0eae88f3
ED
153 h = (__force u32)iph->daddr;
154 h2 = (__force u32)iph->saddr ^ iph->protocol;
b9959c2e
CG
155 if (iph->frag_off & htons(IP_MF|IP_OFFSET))
156 break;
157 poff = proto_ports_offset(iph->protocol);
158 if (poff >= 0 &&
159 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
160 iph = ip_hdr(skb);
161 h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
162 }
1da177e4
LT
163 break;
164 }
60678040 165 case htons(ETH_P_IPV6):
1da177e4 166 {
f2f00981 167 struct ipv6hdr *iph;
b9959c2e 168 int poff;
f2f00981
CG
169
170 if (!pskb_network_may_pull(skb, sizeof(*iph)))
171 goto err;
172 iph = ipv6_hdr(skb);
0eae88f3
ED
173 h = (__force u32)iph->daddr.s6_addr32[3];
174 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
b9959c2e
CG
175 poff = proto_ports_offset(iph->nexthdr);
176 if (poff >= 0 &&
177 pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
178 iph = ipv6_hdr(skb);
179 h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
180 }
1da177e4
LT
181 break;
182 }
183 default:
f2f00981 184err:
0eae88f3 185 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
6f9e98f7 186 h2 = (unsigned long)skb->sk;
1da177e4 187 }
6f9e98f7 188
1da177e4
LT
189 return sfq_fold_hash(q, h, h2);
190}
191
7d2681a6
PM
192static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
193 int *qerr)
194{
195 struct sfq_sched_data *q = qdisc_priv(sch);
196 struct tcf_result res;
197 int result;
198
199 if (TC_H_MAJ(skb->priority) == sch->handle &&
200 TC_H_MIN(skb->priority) > 0 &&
201 TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
202 return TC_H_MIN(skb->priority);
203
204 if (!q->filter_list)
205 return sfq_hash(q, skb) + 1;
206
c27f339a 207 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
7d2681a6
PM
208 result = tc_classify(skb, q->filter_list, &res);
209 if (result >= 0) {
210#ifdef CONFIG_NET_CLS_ACT
211 switch (result) {
212 case TC_ACT_STOLEN:
213 case TC_ACT_QUEUED:
378a2f09 214 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
7d2681a6
PM
215 case TC_ACT_SHOT:
216 return 0;
217 }
218#endif
219 if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
220 return TC_H_MIN(res.classid);
221 }
222 return 0;
223}
224
eda83e3b
ED
225/*
226 * x : slot number [0 .. SFQ_SLOTS - 1]
227 */
1da177e4
LT
228static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
229{
230 sfq_index p, n;
eda83e3b
ED
231 int qlen = q->slots[x].qlen;
232
233 p = qlen + SFQ_SLOTS;
234 n = q->dep[qlen].next;
1da177e4 235
eda83e3b
ED
236 q->slots[x].dep.next = n;
237 q->slots[x].dep.prev = p;
238
239 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
240 sfq_dep_head(q, n)->prev = x;
1da177e4
LT
241}
242
eda83e3b
ED
243#define sfq_unlink(q, x, n, p) \
244 n = q->slots[x].dep.next; \
245 p = q->slots[x].dep.prev; \
246 sfq_dep_head(q, p)->next = n; \
247 sfq_dep_head(q, n)->prev = p
248
249
1da177e4
LT
250static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
251{
252 sfq_index p, n;
eda83e3b 253 int d;
1da177e4 254
eda83e3b 255 sfq_unlink(q, x, n, p);
1da177e4 256
eda83e3b
ED
257 d = q->slots[x].qlen--;
258 if (n == p && q->cur_depth == d)
259 q->cur_depth--;
1da177e4
LT
260 sfq_link(q, x);
261}
262
263static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
264{
265 sfq_index p, n;
266 int d;
267
eda83e3b 268 sfq_unlink(q, x, n, p);
1da177e4 269
eda83e3b
ED
270 d = ++q->slots[x].qlen;
271 if (q->cur_depth < d)
272 q->cur_depth = d;
1da177e4
LT
273 sfq_link(q, x);
274}
275
eda83e3b
ED
276/* helper functions : might be changed when/if skb use a standard list_head */
277
278/* remove one skb from tail of slot queue */
279static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
280{
281 struct sk_buff *skb = slot->skblist_prev;
282
283 slot->skblist_prev = skb->prev;
284 skb->next = skb->prev = NULL;
285 return skb;
286}
287
288/* remove one skb from head of slot queue */
289static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
290{
291 struct sk_buff *skb = slot->skblist_next;
292
293 slot->skblist_next = skb->next;
294 skb->next = skb->prev = NULL;
295 return skb;
296}
297
298static inline void slot_queue_init(struct sfq_slot *slot)
299{
300 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
301}
302
303/* add skb to slot queue (tail add) */
304static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
305{
306 skb->prev = slot->skblist_prev;
307 skb->next = (struct sk_buff *)slot;
308 slot->skblist_prev->next = skb;
309 slot->skblist_prev = skb;
310}
311
312#define slot_queue_walk(slot, skb) \
313 for (skb = slot->skblist_next; \
314 skb != (struct sk_buff *)slot; \
315 skb = skb->next)
316
1da177e4
LT
317static unsigned int sfq_drop(struct Qdisc *sch)
318{
319 struct sfq_sched_data *q = qdisc_priv(sch);
eda83e3b 320 sfq_index x, d = q->cur_depth;
1da177e4
LT
321 struct sk_buff *skb;
322 unsigned int len;
eda83e3b 323 struct sfq_slot *slot;
1da177e4 324
eda83e3b 325 /* Queue is full! Find the longest slot and drop tail packet from it */
1da177e4 326 if (d > 1) {
eda83e3b
ED
327 x = q->dep[d].next;
328 slot = &q->slots[x];
329drop:
330 skb = slot_dequeue_tail(slot);
0abf77e5 331 len = qdisc_pkt_len(skb);
1da177e4 332 sfq_dec(q, x);
eda83e3b 333 kfree_skb(skb);
1da177e4
LT
334 sch->q.qlen--;
335 sch->qstats.drops++;
f5539eb8 336 sch->qstats.backlog -= len;
1da177e4
LT
337 return len;
338 }
339
340 if (d == 1) {
341 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
eda83e3b
ED
342 x = q->tail->next;
343 slot = &q->slots[x];
344 q->tail->next = slot->next;
345 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
346 goto drop;
1da177e4
LT
347 }
348
349 return 0;
350}
351
352static int
6f9e98f7 353sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1da177e4
LT
354{
355 struct sfq_sched_data *q = qdisc_priv(sch);
7d2681a6 356 unsigned int hash;
1da177e4 357 sfq_index x;
eda83e3b 358 struct sfq_slot *slot;
7f3ff4f6 359 int uninitialized_var(ret);
7d2681a6
PM
360
361 hash = sfq_classify(skb, sch, &ret);
362 if (hash == 0) {
c27f339a 363 if (ret & __NET_XMIT_BYPASS)
7d2681a6
PM
364 sch->qstats.drops++;
365 kfree_skb(skb);
366 return ret;
367 }
368 hash--;
1da177e4
LT
369
370 x = q->ht[hash];
eda83e3b
ED
371 slot = &q->slots[x];
372 if (x == SFQ_EMPTY_SLOT) {
373 x = q->dep[0].next; /* get a free slot */
374 q->ht[hash] = x;
375 slot = &q->slots[x];
376 slot->hash = hash;
377 slot_queue_init(slot);
1da177e4 378 }
6f9e98f7 379
eda83e3b 380 /* If selected queue has length q->limit, do simple tail drop,
32740ddc
AK
381 * i.e. drop _this_ packet.
382 */
eda83e3b 383 if (slot->qlen >= q->limit)
32740ddc
AK
384 return qdisc_drop(skb, sch);
385
0abf77e5 386 sch->qstats.backlog += qdisc_pkt_len(skb);
eda83e3b 387 slot_queue_add(slot, skb);
1da177e4 388 sfq_inc(q, x);
eda83e3b
ED
389 if (slot->qlen == 1) { /* The flow is new */
390 if (q->tail == NULL) { /* It is the first flow */
391 slot->next = x;
1da177e4 392 } else {
eda83e3b
ED
393 slot->next = q->tail->next;
394 q->tail->next = x;
1da177e4 395 }
eda83e3b
ED
396 q->tail = slot;
397 slot->allot = q->quantum;
1da177e4 398 }
5588b40d 399 if (++sch->q.qlen <= q->limit) {
0abf77e5 400 sch->bstats.bytes += qdisc_pkt_len(skb);
1da177e4 401 sch->bstats.packets++;
9871e50e 402 return NET_XMIT_SUCCESS;
1da177e4
LT
403 }
404
405 sfq_drop(sch);
406 return NET_XMIT_CN;
407}
408
48a8f519
PM
409static struct sk_buff *
410sfq_peek(struct Qdisc *sch)
411{
412 struct sfq_sched_data *q = qdisc_priv(sch);
1da177e4 413
48a8f519 414 /* No active slots */
eda83e3b 415 if (q->tail == NULL)
48a8f519 416 return NULL;
1da177e4 417
eda83e3b 418 return q->slots[q->tail->next].skblist_next;
48a8f519 419}
1da177e4
LT
420
421static struct sk_buff *
6f9e98f7 422sfq_dequeue(struct Qdisc *sch)
1da177e4
LT
423{
424 struct sfq_sched_data *q = qdisc_priv(sch);
425 struct sk_buff *skb;
aa3e2199 426 sfq_index a, next_a;
eda83e3b 427 struct sfq_slot *slot;
1da177e4
LT
428
429 /* No active slots */
eda83e3b 430 if (q->tail == NULL)
1da177e4
LT
431 return NULL;
432
eda83e3b
ED
433 a = q->tail->next;
434 slot = &q->slots[a];
435 skb = slot_dequeue_head(slot);
1da177e4
LT
436 sfq_dec(q, a);
437 sch->q.qlen--;
0abf77e5 438 sch->qstats.backlog -= qdisc_pkt_len(skb);
1da177e4
LT
439
440 /* Is the slot empty? */
eda83e3b
ED
441 if (slot->qlen == 0) {
442 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
443 next_a = slot->next;
aa3e2199 444 if (a == next_a) {
eda83e3b 445 q->tail = NULL; /* no more active slots */
1da177e4
LT
446 return skb;
447 }
eda83e3b
ED
448 q->tail->next = next_a;
449 } else if ((slot->allot -= qdisc_pkt_len(skb)) <= 0) {
450 q->tail = slot;
451 slot->allot += q->quantum;
1da177e4
LT
452 }
453 return skb;
454}
455
456static void
6f9e98f7 457sfq_reset(struct Qdisc *sch)
1da177e4
LT
458{
459 struct sk_buff *skb;
460
461 while ((skb = sfq_dequeue(sch)) != NULL)
462 kfree_skb(skb);
463}
464
465static void sfq_perturbation(unsigned long arg)
466{
6f9e98f7 467 struct Qdisc *sch = (struct Qdisc *)arg;
1da177e4
LT
468 struct sfq_sched_data *q = qdisc_priv(sch);
469
d46f8dd8 470 q->perturbation = net_random();
1da177e4 471
32740ddc
AK
472 if (q->perturb_period)
473 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
1da177e4
LT
474}
475
1e90474c 476static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
477{
478 struct sfq_sched_data *q = qdisc_priv(sch);
1e90474c 479 struct tc_sfq_qopt *ctl = nla_data(opt);
5e50da01 480 unsigned int qlen;
1da177e4 481
1e90474c 482 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
1da177e4
LT
483 return -EINVAL;
484
485 sch_tree_lock(sch);
5ce2d488 486 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
6f9e98f7 487 q->perturb_period = ctl->perturb_period * HZ;
1da177e4 488 if (ctl->limit)
32740ddc 489 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
1da177e4 490
5e50da01 491 qlen = sch->q.qlen;
5588b40d 492 while (sch->q.qlen > q->limit)
1da177e4 493 sfq_drop(sch);
5e50da01 494 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
1da177e4
LT
495
496 del_timer(&q->perturb_timer);
497 if (q->perturb_period) {
32740ddc 498 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
d46f8dd8 499 q->perturbation = net_random();
1da177e4
LT
500 }
501 sch_tree_unlock(sch);
502 return 0;
503}
504
1e90474c 505static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
506{
507 struct sfq_sched_data *q = qdisc_priv(sch);
508 int i;
509
d3e99483 510 q->perturb_timer.function = sfq_perturbation;
c19a28e1 511 q->perturb_timer.data = (unsigned long)sch;
d3e99483 512 init_timer_deferrable(&q->perturb_timer);
1da177e4 513
6f9e98f7 514 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
eda83e3b 515 q->ht[i] = SFQ_EMPTY_SLOT;
6f9e98f7
SH
516
517 for (i = 0; i < SFQ_DEPTH; i++) {
eda83e3b
ED
518 q->dep[i].next = i + SFQ_SLOTS;
519 q->dep[i].prev = i + SFQ_SLOTS;
1da177e4 520 }
6f9e98f7 521
32740ddc 522 q->limit = SFQ_DEPTH - 1;
eda83e3b
ED
523 q->cur_depth = 0;
524 q->tail = NULL;
1da177e4 525 if (opt == NULL) {
5ce2d488 526 q->quantum = psched_mtu(qdisc_dev(sch));
1da177e4 527 q->perturb_period = 0;
d46f8dd8 528 q->perturbation = net_random();
1da177e4
LT
529 } else {
530 int err = sfq_change(sch, opt);
531 if (err)
532 return err;
533 }
6f9e98f7 534
eda83e3b 535 for (i = 0; i < SFQ_SLOTS; i++)
1da177e4
LT
536 sfq_link(q, i);
537 return 0;
538}
539
540static void sfq_destroy(struct Qdisc *sch)
541{
542 struct sfq_sched_data *q = qdisc_priv(sch);
7d2681a6 543
ff31ab56 544 tcf_destroy_chain(&q->filter_list);
980c478d
JP
545 q->perturb_period = 0;
546 del_timer_sync(&q->perturb_timer);
1da177e4
LT
547}
548
549static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
550{
551 struct sfq_sched_data *q = qdisc_priv(sch);
27a884dc 552 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
553 struct tc_sfq_qopt opt;
554
555 opt.quantum = q->quantum;
6f9e98f7 556 opt.perturb_period = q->perturb_period / HZ;
1da177e4
LT
557
558 opt.limit = q->limit;
559 opt.divisor = SFQ_HASH_DIVISOR;
cdec7e50 560 opt.flows = q->limit;
1da177e4 561
1e90474c 562 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
1da177e4
LT
563
564 return skb->len;
565
1e90474c 566nla_put_failure:
dc5fc579 567 nlmsg_trim(skb, b);
1da177e4
LT
568 return -1;
569}
570
41065fba
JP
571static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
572{
573 return NULL;
574}
575
7d2681a6
PM
576static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
577{
578 return 0;
579}
580
eb4a5527
JP
581static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
582 u32 classid)
583{
584 return 0;
585}
586
da7115d9
JP
587static void sfq_put(struct Qdisc *q, unsigned long cl)
588{
589}
590
7d2681a6
PM
591static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
592{
593 struct sfq_sched_data *q = qdisc_priv(sch);
594
595 if (cl)
596 return NULL;
597 return &q->filter_list;
598}
599
94de78d1
PM
600static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
601 struct sk_buff *skb, struct tcmsg *tcm)
602{
603 tcm->tcm_handle |= TC_H_MIN(cl);
604 return 0;
605}
606
607static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
608 struct gnet_dump *d)
609{
610 struct sfq_sched_data *q = qdisc_priv(sch);
eda83e3b
ED
611 const struct sfq_slot *slot = &q->slots[q->ht[cl - 1]];
612 struct gnet_stats_queue qs = { .qlen = slot->qlen };
613 struct tc_sfq_xstats xstats = { .allot = slot->allot };
c4266263
ED
614 struct sk_buff *skb;
615
eda83e3b 616 slot_queue_walk(slot, skb)
c4266263 617 qs.backlog += qdisc_pkt_len(skb);
94de78d1
PM
618
619 if (gnet_stats_copy_queue(d, &qs) < 0)
620 return -1;
621 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
622}
623
7d2681a6
PM
624static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
625{
94de78d1
PM
626 struct sfq_sched_data *q = qdisc_priv(sch);
627 unsigned int i;
628
629 if (arg->stop)
630 return;
631
632 for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
eda83e3b 633 if (q->ht[i] == SFQ_EMPTY_SLOT ||
94de78d1
PM
634 arg->count < arg->skip) {
635 arg->count++;
636 continue;
637 }
638 if (arg->fn(sch, i + 1, arg) < 0) {
639 arg->stop = 1;
640 break;
641 }
642 arg->count++;
643 }
7d2681a6
PM
644}
645
646static const struct Qdisc_class_ops sfq_class_ops = {
41065fba 647 .leaf = sfq_leaf,
7d2681a6 648 .get = sfq_get,
da7115d9 649 .put = sfq_put,
7d2681a6 650 .tcf_chain = sfq_find_tcf,
eb4a5527 651 .bind_tcf = sfq_bind,
da7115d9 652 .unbind_tcf = sfq_put,
94de78d1
PM
653 .dump = sfq_dump_class,
654 .dump_stats = sfq_dump_class_stats,
7d2681a6
PM
655 .walk = sfq_walk,
656};
657
20fea08b 658static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
7d2681a6 659 .cl_ops = &sfq_class_ops,
1da177e4
LT
660 .id = "sfq",
661 .priv_size = sizeof(struct sfq_sched_data),
662 .enqueue = sfq_enqueue,
663 .dequeue = sfq_dequeue,
48a8f519 664 .peek = sfq_peek,
1da177e4
LT
665 .drop = sfq_drop,
666 .init = sfq_init,
667 .reset = sfq_reset,
668 .destroy = sfq_destroy,
669 .change = NULL,
670 .dump = sfq_dump,
671 .owner = THIS_MODULE,
672};
673
674static int __init sfq_module_init(void)
675{
676 return register_qdisc(&sfq_qdisc_ops);
677}
10297b99 678static void __exit sfq_module_exit(void)
1da177e4
LT
679{
680 unregister_qdisc(&sfq_qdisc_ops);
681}
682module_init(sfq_module_init)
683module_exit(sfq_module_exit)
684MODULE_LICENSE("GPL");
This page took 0.578071 seconds and 5 git commands to generate.