2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <net/pkt_sched.h>
23 #include <net/inet_ecn.h>
27 /* Parameters, settable by user:
28 -----------------------------
30 limit - bytes (must be > qth_max + burst)
32 Hard limit on queue length, should be chosen >qth_max
33 to allow packet bursts. This parameter does not
34 affect the algorithms behaviour and can be chosen
35 arbitrarily high (well, less than ram size)
36 Really, this limit will never be reached
37 if RED works correctly.
42 u32 limit
; /* HARD maximal queue length */
44 struct red_parms parms
;
45 struct red_stats stats
;
49 static inline int red_use_ecn(struct red_sched_data
*q
)
51 return q
->flags
& TC_RED_ECN
;
54 static inline int red_use_harddrop(struct red_sched_data
*q
)
56 return q
->flags
& TC_RED_HARDDROP
;
59 static int red_enqueue(struct sk_buff
*skb
, struct Qdisc
* sch
)
61 struct red_sched_data
*q
= qdisc_priv(sch
);
62 struct Qdisc
*child
= q
->qdisc
;
65 q
->parms
.qavg
= red_calc_qavg(&q
->parms
, child
->qstats
.backlog
);
67 if (red_is_idling(&q
->parms
))
68 red_end_of_idle_period(&q
->parms
);
70 switch (red_action(&q
->parms
, q
->parms
.qavg
)) {
75 sch
->qstats
.overlimits
++;
76 if (!red_use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
85 sch
->qstats
.overlimits
++;
86 if (red_use_harddrop(q
) || !red_use_ecn(q
) ||
87 !INET_ECN_set_ce(skb
)) {
88 q
->stats
.forced_drop
++;
92 q
->stats
.forced_mark
++;
96 ret
= child
->enqueue(skb
, child
);
97 if (likely(ret
== NET_XMIT_SUCCESS
)) {
98 sch
->bstats
.bytes
+= skb
->len
;
99 sch
->bstats
.packets
++;
108 qdisc_drop(skb
, sch
);
112 static int red_requeue(struct sk_buff
*skb
, struct Qdisc
* sch
)
114 struct red_sched_data
*q
= qdisc_priv(sch
);
115 struct Qdisc
*child
= q
->qdisc
;
118 if (red_is_idling(&q
->parms
))
119 red_end_of_idle_period(&q
->parms
);
121 ret
= child
->ops
->requeue(skb
, child
);
122 if (likely(ret
== NET_XMIT_SUCCESS
)) {
123 sch
->qstats
.requeues
++;
129 static struct sk_buff
* red_dequeue(struct Qdisc
* sch
)
132 struct red_sched_data
*q
= qdisc_priv(sch
);
133 struct Qdisc
*child
= q
->qdisc
;
135 skb
= child
->dequeue(child
);
138 else if (!red_is_idling(&q
->parms
))
139 red_start_of_idle_period(&q
->parms
);
144 static unsigned int red_drop(struct Qdisc
* sch
)
146 struct red_sched_data
*q
= qdisc_priv(sch
);
147 struct Qdisc
*child
= q
->qdisc
;
150 if (child
->ops
->drop
&& (len
= child
->ops
->drop(child
)) > 0) {
157 if (!red_is_idling(&q
->parms
))
158 red_start_of_idle_period(&q
->parms
);
163 static void red_reset(struct Qdisc
* sch
)
165 struct red_sched_data
*q
= qdisc_priv(sch
);
167 qdisc_reset(q
->qdisc
);
169 red_restart(&q
->parms
);
172 static void red_destroy(struct Qdisc
*sch
)
174 struct red_sched_data
*q
= qdisc_priv(sch
);
175 qdisc_destroy(q
->qdisc
);
178 static struct Qdisc
*red_create_dflt(struct net_device
*dev
, u32 limit
)
180 struct Qdisc
*q
= qdisc_create_dflt(dev
, &bfifo_qdisc_ops
);
185 rta
= kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt
)),
188 rta
->rta_type
= RTM_NEWQDISC
;
189 rta
->rta_len
= RTA_LENGTH(sizeof(struct tc_fifo_qopt
));
190 ((struct tc_fifo_qopt
*)RTA_DATA(rta
))->limit
= limit
;
192 ret
= q
->ops
->change(q
, rta
);
203 static int red_change(struct Qdisc
*sch
, struct rtattr
*opt
)
205 struct red_sched_data
*q
= qdisc_priv(sch
);
206 struct rtattr
*tb
[TCA_RED_MAX
];
207 struct tc_red_qopt
*ctl
;
208 struct Qdisc
*child
= NULL
;
210 if (opt
== NULL
|| rtattr_parse_nested(tb
, TCA_RED_MAX
, opt
))
213 if (tb
[TCA_RED_PARMS
-1] == NULL
||
214 RTA_PAYLOAD(tb
[TCA_RED_PARMS
-1]) < sizeof(*ctl
) ||
215 tb
[TCA_RED_STAB
-1] == NULL
||
216 RTA_PAYLOAD(tb
[TCA_RED_STAB
-1]) < RED_STAB_SIZE
)
219 ctl
= RTA_DATA(tb
[TCA_RED_PARMS
-1]);
221 if (ctl
->limit
> 0) {
222 child
= red_create_dflt(sch
->dev
, ctl
->limit
);
228 q
->flags
= ctl
->flags
;
229 q
->limit
= ctl
->limit
;
231 qdisc_destroy(xchg(&q
->qdisc
, child
));
233 red_set_parms(&q
->parms
, ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
,
234 ctl
->Plog
, ctl
->Scell_log
,
235 RTA_DATA(tb
[TCA_RED_STAB
-1]));
237 if (skb_queue_empty(&sch
->q
))
238 red_end_of_idle_period(&q
->parms
);
240 sch_tree_unlock(sch
);
244 static int red_init(struct Qdisc
* sch
, struct rtattr
*opt
)
246 struct red_sched_data
*q
= qdisc_priv(sch
);
248 q
->qdisc
= &noop_qdisc
;
249 return red_change(sch
, opt
);
252 static int red_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
254 struct red_sched_data
*q
= qdisc_priv(sch
);
255 struct rtattr
*opts
= NULL
;
256 struct tc_red_qopt opt
= {
259 .qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
,
260 .qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
,
261 .Wlog
= q
->parms
.Wlog
,
262 .Plog
= q
->parms
.Plog
,
263 .Scell_log
= q
->parms
.Scell_log
,
266 opts
= RTA_NEST(skb
, TCA_OPTIONS
);
267 RTA_PUT(skb
, TCA_RED_PARMS
, sizeof(opt
), &opt
);
268 return RTA_NEST_END(skb
, opts
);
271 return RTA_NEST_CANCEL(skb
, opts
);
274 static int red_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
276 struct red_sched_data
*q
= qdisc_priv(sch
);
277 struct tc_red_xstats st
= {
278 .early
= q
->stats
.prob_drop
+ q
->stats
.forced_drop
,
279 .pdrop
= q
->stats
.pdrop
,
280 .other
= q
->stats
.other
,
281 .marked
= q
->stats
.prob_mark
+ q
->stats
.forced_mark
,
284 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
287 static int red_dump_class(struct Qdisc
*sch
, unsigned long cl
,
288 struct sk_buff
*skb
, struct tcmsg
*tcm
)
290 struct red_sched_data
*q
= qdisc_priv(sch
);
294 tcm
->tcm_handle
|= TC_H_MIN(1);
295 tcm
->tcm_info
= q
->qdisc
->handle
;
299 static int red_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
302 struct red_sched_data
*q
= qdisc_priv(sch
);
308 *old
= xchg(&q
->qdisc
, new);
311 sch_tree_unlock(sch
);
315 static struct Qdisc
*red_leaf(struct Qdisc
*sch
, unsigned long arg
)
317 struct red_sched_data
*q
= qdisc_priv(sch
);
321 static unsigned long red_get(struct Qdisc
*sch
, u32 classid
)
326 static void red_put(struct Qdisc
*sch
, unsigned long arg
)
331 static int red_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
332 struct rtattr
**tca
, unsigned long *arg
)
337 static int red_delete(struct Qdisc
*sch
, unsigned long cl
)
342 static void red_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
345 if (walker
->count
>= walker
->skip
)
346 if (walker
->fn(sch
, 1, walker
) < 0) {
354 static struct tcf_proto
**red_find_tcf(struct Qdisc
*sch
, unsigned long cl
)
359 static struct Qdisc_class_ops red_class_ops
= {
364 .change
= red_change_class
,
365 .delete = red_delete
,
367 .tcf_chain
= red_find_tcf
,
368 .dump
= red_dump_class
,
371 static struct Qdisc_ops red_qdisc_ops
= {
373 .priv_size
= sizeof(struct red_sched_data
),
374 .cl_ops
= &red_class_ops
,
375 .enqueue
= red_enqueue
,
376 .dequeue
= red_dequeue
,
377 .requeue
= red_requeue
,
381 .destroy
= red_destroy
,
382 .change
= red_change
,
384 .dump_stats
= red_dump_stats
,
385 .owner
= THIS_MODULE
,
388 static int __init
red_module_init(void)
390 return register_qdisc(&red_qdisc_ops
);
393 static void __exit
red_module_exit(void)
395 unregister_qdisc(&red_qdisc_ops
);
398 module_init(red_module_init
)
399 module_exit(red_module_exit
)
401 MODULE_LICENSE("GPL");